text
stringlengths 2
999k
|
|---|
#Create a program that will play the “cows and bulls” game with the user. The game works like this:
#Randomly generate a 4-digit number. Ask the user to guess a 4-digit number. For every digit that the user
# guessed correctly in the correct place, they have a “cow”. For every digit the user guessed correctly in the
# wrong place is a “bull.” Every time the user makes a guess, tell them how many “cows” and “bulls” they have.
# Once the user guesses the correct number, the game is over. Keep track of the number of guesses the user makes
# throughout teh game and tell the user at the end.
#Say the number generated by the computer is 1038. An example interaction could look like this:
# Welcome to the Cows and Bulls Game!
# Enter a number:
# >>> 1234
# 2 cows, 0 bulls
# >>> 1256
# 1 cow, 1 bull
# ...
#Until the user guesses the number.
import random
def main():
print("Welcome to the Cows and Bulls Game!")
number = str(random.randint(1000,9999))
print("The password is:",number)
guess = 0
count = 0
while(not guess==number):
count+=1
#assumes user entered four-digit number, no error handling
guess = input("Enter a number: ")
cows = 0
bulls = 0
for i in range(0,3):
if(guess[i]==number[i]):
cows+=1
elif(guess[i] in number):
bulls+=1
if(guess==number):
print("You got it!!! It took you",count,"guesses to get it right")
else:
print(cows,"cow(s),",bulls,"bull(s)")
main()
|
print("The following are the safety measures we should take against the new COVID-19 virus: ")
print("We all should wash our hands frequently")
print("We all should maintain social distancing")
print("We all should avoid touching nose, eyes and mouth")
print("Seek medical care urgently if you have fever, cough and difficulty in breathing")
|
import pytest
from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.test_node import ensureElectionsDone
from sovtoken.constants import ADDRESS, AMOUNT
from sovtoken.test.helpers.helper_general import utxo_from_addr_and_seq_no
from sovtokenfees.constants import FEES
from sovtokenfees.test.catchup.helper import scenario_txns_during_catchup
from sovtokenfees.test.constants import NYM_FEES_ALIAS, NODE_FEES_ALIAS, XFER_PUBLIC_FEES_ALIAS, alias_to_txn_type
from sovtokenfees.test.helper import get_amount_from_token_txn, send_and_check_nym_with_fees, send_and_check_transfer, \
add_fees_request_with_address
from plenum.test.pool_transactions.helper import prepare_node_request, disconnect_node_and_ensure_disconnected
from plenum.common.util import hexToFriendly
from sovtokenfees.test.helpers import HelperNode
from indy_node.test.helper import start_stopped_node
from plenum.common.constants import KeyValueStorageType
nodeCount = 6
whitelist = ['Consistency verification of merkle tree from hash store failed']
@pytest.fixture(scope="module")
def tconf(tconf):
old_b_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchSize = 1
yield tconf
tconf.Max3PCBatchSize = old_b_size
@pytest.fixture()
def addresses(helpers):
return helpers.wallet.create_new_addresses(2)
@pytest.fixture()
def mint_tokens(helpers, addresses):
outputs = [{ADDRESS: addresses[0], AMOUNT: 1000}]
return helpers.general.do_mint(outputs)
@pytest.fixture(
scope='module',
params=[
{NYM_FEES_ALIAS: 4,
XFER_PUBLIC_FEES_ALIAS: 4}, # with fees
], ids=lambda x: 'fees'
)
def fees(request):
return request.param
def demote_node(helpers, wallet, node):
req = helpers.sdk.sdk_build_demote_txn(node, wallet)
helpers.sdk.send_request_objects([req], wallet)
def promote_node(helpers, wallet, node):
req = helpers.sdk.sdk_build_promote_txn(node, wallet)
helpers.sdk.send_request_objects([req], wallet)
def restart_node(restarted_node, pool, looper, tconf, tdir, allPluginsPath, do_post_node_creation, fees):
node_idx = pool.index(restarted_node)
restarted_node.cleanupOnStopping = False
disconnect_node_and_ensure_disconnected(looper,
pool,
restarted_node.name,
stopNode=True)
looper.removeProdable(name=restarted_node.name)
restarted_node = start_stopped_node(
restarted_node,
looper,
tconf,
tdir,
allPluginsPath,
start=False,
)
do_post_node_creation(restarted_node)
for fee_alias, amount in fees.items():
HelperNode.fill_auth_map_for_node(restarted_node, alias_to_txn_type[fee_alias])
pool[node_idx] = restarted_node
looper.add(restarted_node)
def test_demote_promote_restart_after_promotion(nodeSetWithIntegratedTokenPlugin,
looper,
sdk_pool_handle,
sdk_wallet_trustee,
tdir,
tconf,
allPluginsPath,
mint_tokens,
address_main,
helpers,
fees_set, addresses, fees,
do_post_node_creation):
pool = nodeSetWithIntegratedTokenPlugin
current_amount = get_amount_from_token_txn(mint_tokens)
seq_no = 1
demoted_node = pool[-1]
from_a_to_b = [addresses[0], addresses[1]]
current_amount, seq_no, _ = send_and_check_transfer(helpers, from_a_to_b, fees, looper,
current_amount, seq_no)
rest_nodes = [n for n in pool if n != demoted_node]
starting_view_no = checkViewNoForNodes(pool)
# Step 1. Demote for node Zeta
demote_node(helpers, sdk_wallet_trustee, demoted_node)
# Step 2. Waiting for view change after nodes count changing
waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1)
ensureElectionsDone(looper, rest_nodes)
ensure_all_nodes_have_same_data(looper, rest_nodes, exclude_from_check='check_seqno_db_equality')
current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
current_amount)
current_amount, seq_no, _ = send_and_check_transfer(helpers, from_a_to_b, fees, looper,
current_amount, seq_no)
starting_view_no = checkViewNoForNodes(rest_nodes)
# Step 3. Promote node back and waiting for view change
promote_node(helpers, sdk_wallet_trustee, demoted_node)
waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1)
ensureElectionsDone(looper, rest_nodes)
# Step 4. Restart promoted node only after Node txn ordering
restart_node(demoted_node, pool, looper, tconf, tdir, allPluginsPath, do_post_node_creation, fees)
ensure_all_nodes_have_same_data(looper, pool, custom_timeout=60, exclude_from_check='check_seqno_db_equality')
ensureElectionsDone(looper, pool)
# Step 5. Make sure that pool works fine
current_amount, seq_no, _ = send_and_check_transfer(helpers, from_a_to_b, fees, looper,
current_amount, seq_no)
current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
current_amount)
ensure_all_nodes_have_same_data(looper, pool, exclude_from_check='check_seqno_db_equality')
# sdk_ensure_pool_functional(looper, pool, sdk_wallet_steward, sdk_pool_handle)
|
from scipy import stats
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
import os
import random
import sys
import csv
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import transformers
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers import AdamW
from torch.cuda.amp import autocast
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
import tensorflow as tf
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LinearRegression
start = time.time()
torch.cuda.empty_cache()
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
tf.random.set_seed(seed_val)
class BERT_Arch(nn.Module):
def __init__(self, bert):
super(BERT_Arch, self).__init__()
self.bert = bert
# dropout layer
self.dropout = nn.Dropout(0.1)
# relu activation function
self.relu = nn.ReLU()
self.leakyrelu = nn.LeakyReLU()
self.elu = nn.ELU()
self.tanh = nn.Tanh()
self.zeros=0
self.totals=0
# dense layer 1
self.fc1 = nn.Linear(768,600)
# dense layer 2 (Output layer)
self.fc2 = nn.Linear(600,1)
self.fc3 = nn.Linear(1,1)
#LSTM
self.hidden_dim = 768 #300
self.emb_dim = 768
self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)
#Define Attention Network
def attnetwork(self, encoder_out, final_hidden):
hidden = final_hidden.squeeze(0)
attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)
soft_attn_weights = F.softmax(attn_weights, 1)
new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)
return new_hidden, soft_attn_weights
#define the forward pass
def forward(self, sent_id, mask, hist):
cls_vec = []
chunk_max_weights = []
for i in range(len(sent_id)):
if i < 5:
#print("chunk i: ", i)
ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)
attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)
#pass the inputs to the model
model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)
cls_hs=model_outputs[1]
atten=model_outputs[2]
cls_vec.append(cls_hs)
del ip_id
del attn_mask
'''
col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy())
col_sum = col_sum[::-1]
max_col_sum = max(col_sum)
top_word_mean = col_sum[:5].mean()
chunk_max_weights.append(top_word_mean)
'''
#cls_vec_ = torch.mean(torch.stack(cls_vec, dim=0), dim=0)
cls_vec = torch.stack(cls_vec, dim=0)
cls_vec = cls_vec.to(torch.float32) #LSTM
#print("cls_vec shape: ", cls_vec.shape, type(cls_vec), cls_vec.dtype)
'''
x = self.fc1(cls_vec_)
x = self.relu(x)
x = self.dropout(x)
chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)
chunk_weights = chunk_weights.cuda()
prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1))
prod1 = prod1.transpose(1,2)
prod1 = prod1.to(torch.float32)
'''
emb_input = cls_vec
inputx = self.dropout(emb_input)
output, (hn, cn) = self.encoder(inputx) #emb_input)
fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B
fbout = fbout.permute(1,0,2)
fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)
attn_out, attn_weights = self.attnetwork(fbout, fbhn)
'''
chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)
chunk_weights = chunk_weights.cuda()
prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1))
'''
prod = torch.bmm(cls_vec.transpose(1,2), attn_weights.transpose(0,1).unsqueeze(1))
prod_sum = torch.mean(prod, 0).transpose(0,1)
x = prod_sum #attn_out
x = self.fc1(x)
x =self.leakyrelu(x)
x = self.dropout(x)
#hist = hist.unsqueeze(0)
#hist = self.fc3(hist)
#x = torch.cat((x, hist.unsqueeze(0)), dim=1)
#x = self.dropout(x)
# output layer
y = self.fc2(x)
y = self.leakyrelu(y)
return x, y
# function to train the model
def train(epoch):
memory_file = open('memory_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'a+')
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds = []
total_hist = []
xs = []
# iterate over list of documents
for i in range(len(train_seq)):
memory_file.write("doc num: "+str(i)+" before train: "+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\n')
memory_file.write("doc num: "+str(i)+" before train: "+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\n')
sent_id = train_seq[i]
mask = train_mask[i]
hist = train_hist[i]
labels = train_y[i].unsqueeze(0).unsqueeze(0)
# clear previously calculated gradients
model.zero_grad()
memory_file.write("doc num: "+str(i)+" len(sent_id): "+str(len(sent_id))+" \n")
with autocast():
# get model predictions for the current batch
x, preds = model(sent_id, mask, hist)
# compute the loss between actual and predicted values
#loss = huber_loss(preds, labels)
loss = mse_loss(preds, labels)
# model predictions are stored on GPU. So, push it to CPU
preds = preds.detach().cpu().numpy()
x = x.detach().cpu().numpy().ravel()
# add on to the total loss
total_loss = total_loss + loss.item()
xs.append(x)
# backward pass to calculate the gradients
loss.backward()
# clip the the gradients to 1.0. It helps in preventing the exploding gradient problem
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
# append the model predictions
total_preds.append(preds)
loss.detach().cpu()
memory_file.write("doc num: "+str(i)+" after train: "+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\n')
memory_file.write("doc num: "+str(i)+" after train: "+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\n')
memory_file.flush()
# compute the training loss of the epoch
avg_loss = total_loss / len(train_seq)
xs = np.array(xs)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#total_hist = np.concatenate(total_hist, axis=0)
memory_file.close()
#returns the loss and predictions
return avg_loss, total_preds , xs
# function for evaluating the model
def evaluate():
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0.0, 0.0
# empty list to save the model predictions
total_preds = []
total_xs = []
# iterate over list of documents
for i in range(len(valid_seq)):
sent_id = valid_seq[i]
mask = valid_mask[i]
hist = valid_hist[i]
labels = valid_y[i].unsqueeze(0).unsqueeze(0)
# deactivate autograd
with torch.no_grad():
with autocast():
# model predictions
x, preds = model(sent_id, mask, hist)
# compute the validation loss between actual and predicted values
loss = mse_loss(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
loss.detach().cpu()
# compute the validation loss of the epoch
avg_loss = total_loss / len(valid_seq)
total_xs = np.array(total_xs)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds, total_xs
def test():
# empty list to save the model predictions
total_xs = []
total_preds=[]
for i in range(len(test_seq)):
sent_id = test_seq[i]
mask = test_mask[i]
hist = test_hist[i]
#labels = test_y[i].unsqueeze(0).unsqueeze(0)
with torch.no_grad():
with autocast():
x, preds = model(sent_id, mask, hist)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
# reshape the predictions in form of (number of samples, no. of classes)
total_xs = np.array(total_xs)
total_preds = np.concatenate(total_preds, axis=0)
return total_xs, total_preds
def train_x():
# empty list to save the model predictions
total_xs = []
total_preds=[]
for i in range(len(train_seq)):
sent_id = train_seq[i]
mask = train_mask[i]
hist = train_hist[i]
#labels = test_y[i].unsqueeze(0).unsqueeze(0)
with torch.no_grad():
with autocast():
x, preds = model(sent_id, mask, hist)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
x = x.detach().cpu().numpy().ravel()
total_xs.append(x)
# reshape the predictions in form of (number of samples, no. of classes)
total_xs = np.array(total_xs)
total_preds = np.concatenate(total_preds, axis=0)
return total_xs, total_preds
# specify GPU
device = torch.device("cuda")
max_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512
sec = sys.argv[2]
bv = sys.argv[3]
fname = "sorted_"+ sec + ".csv"
#end_year = int(sys.argv[1])
#train_years_list = list(range(end_year-5, end_year))
#print("train_years: ", train_years_list)
df = pd.read_csv(fname)
#df = df[:10]
train_text, rem_text, train_hist, rem_hist, train_labels, rem_labels = train_test_split(df['mda'],
df['prev_'+bv],
df[bv],
shuffle=False,
train_size=0.8)
valid_text, test_text, valid_hist, test_hist, valid_labels, test_labels = train_test_split(
rem_text,
rem_hist,
rem_labels,
shuffle=False,
test_size=0.5
)
'''
val_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text,
temp_hist,
temp_labels,
shuffle=False,
test_size=0.2)
val_text = val_text.astype(str)
'''
train_text = train_text.astype(str)
valid_text = valid_text.astype(str)
test_text = test_text.astype(str)
'''
df_train = pd.DataFrame()
df_test = pd.DataFrame()
for y in train_years_list:
df_train = pd.concat([df_train, pd.read_csv(str(y) + "_tok.csv")])
'''
#bert_path = "/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/"
bert_path = "/gpfs/u/home/DLTM/DLTMboxi/scratch/env/longformer-base-4096/"
config = AutoConfig.from_pretrained(bert_path, output_attentions=True)
# import BERT-base pretrained model
bert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/')
# Load the BERT tokenizer
tokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/')
#TRAIN
# tokenize and encode sequences in the training set
tokens_train = tokenizer.batch_encode_plus(
train_text.tolist(),
add_special_tokens=False
)
#Extract input ids
train_seq_ = tokens_train['input_ids']
#Split each document into 510 tokens
train_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]
#print(train_seq[0][0])
#Add [CLS], [SEP] and [PAD] tokens
train_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]
#print(train_seq[0][0])
#df_train_seq=pd.DataFrame()
#df_train_seq["train_seq"]=train_seq
#df_train_seq.to_csv(sec+ "-train_seq.csv")
#Extract attention masks
train_mask_ = tokens_train['attention_mask']
#Split each document into 510 tokens
train_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]
#Add [1] for attention and [0] for [PAD]
train_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]
#VALID
# tokenize and encode sequences in the training set
tokens_valid = tokenizer.batch_encode_plus(
valid_text.tolist(),
add_special_tokens=False
)
#Extract input ids
valid_seq_ = tokens_valid['input_ids']
#Split each document into 510 tokens
valid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]
#print(valid_seq[0][0])
#Add [CLS], [SEP] and [PAD] tokens
valid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]
#print(valid_seq[0][0])
#df_valid_seq=pd.DataFrame()
#df_valid_seq["valid_seq"]=valid_seq
#df_valid_seq.to_csv(sec+ "-valid_seq.csv")
#Extract attention masks
valid_mask_ = tokens_valid['attention_mask']
#Split each document into 510 tokens
valid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]
#Add [1] for attention and [0] for [PAD]
valid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]
#TEST
# tokenize and encode sequences in the test set
tokens_test = tokenizer.batch_encode_plus(
test_text.tolist(),
add_special_tokens=False
)
#Extract input ids
test_seq_ = tokens_test['input_ids']
#Split each document into 510 tokens
test_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]
#Add [CLS], [SEP] and [PAD] tokens
test_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]
#Extract attention masks
test_mask_ = tokens_test['attention_mask']
#Split each document into 510 tokens
test_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]
#Add [1] for attention and [0] for [PAD]
test_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]
train_hist = torch.tensor(train_hist.tolist()).to(device)
train_y = torch.tensor(train_labels.tolist()).to(device)
valid_hist = torch.tensor(valid_hist.tolist()).to(device)
valid_y = torch.tensor(valid_labels.tolist()).to(device)
test_hist = torch.tensor(test_hist.tolist()).to(device)
test_y = torch.tensor(test_labels.tolist()).to(device)
#val_hist = torch.tensor(val_hist.tolist()).to(device)
#val_y = torch.tensor(val_labels.tolist()).to(device)
# freeze all the parameters
for name, param in bert.named_parameters():
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = False #True
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
# define the loss function
mse_loss = nn.MSELoss()
huber_loss = nn.L1Loss()
# number of training epochs
total_epochs = int(sys.argv[4])
start_epoch = int(sys.argv[5])
end_epoch = int(sys.argv[6])
epochs = end_epoch - start_epoch + 1
#plus = int(sys.argv[5])
# different learning rates
learning_rate = float(sys.argv[7])
# set initial loss to previous best
best_valid_loss = float('inf')
best_epoch = 0
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
#for each epoch
for epoch in range(epochs):
#print('\n Epoch {:} / {:}'.format(epoch + 1, epochs))
torch.cuda.empty_cache()
# define the optimizer
optimizer = AdamW(model.parameters(),
lr = learning_rate, eps = 1e-8) # learning rate
#train model
train_loss, _ , xs_final= train(start_epoch+epoch)
#evaluate model
valid_loss, _ , _ = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_epoch = start_epoch + epoch
#print(f'\nTraining Loss: {train_loss:.3f}')
#xs_train = xs_final
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')
#torch.save(model_to_save.state_dict(), 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_epoch'+str(start_epoch+epoch)+'_of_'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.10f}')
print(f'Validation Loss: {valid_loss:.10f}')
valid_loss_file = open('best_valid_loss_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', 'w')
valid_loss_file.write(str(best_valid_loss)+"\n")
valid_loss_file.write(str(best_epoch))
valid_loss_file.close()
'''
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
#load weights of best model
path = 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt'
model.load_state_dict(torch.load(path))
xs_train , _ = train_x()
# get predictions for test data
valid_mses = []
test_mses = []
methods = ["bare", "svr", "kr", "lr"]
_ , preds, xs_valid = evaluate()
preds = np.asarray(preds)
valid_y = valid_y.cpu().data.numpy()
valid_mse = mean_squared_error(valid_y, preds)
valid_mses.append(valid_mse)
xs_test, preds = test()
preds = np.asarray(preds)
test_y = test_y.cpu().data.numpy()
test_mse = mean_squared_error(test_y, preds)
test_mses.append(test_mse)
print("bert mse: ",test_mse)
lr = LinearRegression()
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
svr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')
models_list = [svr, kr, lr]
for m in models_list:
m.fit(xs_train, train_labels.to_numpy())
preds = m.predict(xs_valid)
valid_mse = mean_squared_error(valid_labels.to_numpy(), preds)
valid_mses.append(valid_mse)
preds = m.predict(xs_test)
test_mse = mean_squared_error(test_labels.to_numpy(), preds)
test_mses.append(test_mse)
print(m, test_mse,'---',valid_mse)
mse = str(test_mses[valid_mses.index(min(valid_mses))])+"---"+methods[valid_mses.index(min(valid_mses))]+"---"+str(min(valid_mses))
spearmanr = (stats.spearmanr(preds, test_y))[0]
kendallr = (stats.kendalltau(preds, test_y))[0]
print("longformer mse: ", mse)
mse_file = open('mse_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', "w")
mse_file.write(mse + "\n")
mse_file.write(str(best_valid_loss)+"\n")
mse_file.write(str(spearmanr) + "\n")
mse_file.write(str(kendallr) + "\n")
#mse_file.close()
test_error = pd.DataFrame()
test_error['cik_year'] = test_cik.tolist()
test_error['test_y'] = test_y.tolist()
test_error['preds'] = [p[0] for p in preds.tolist()]
test_error['error'] = test_error['test_y'] - test_error['preds']
test_error.to_csv('error_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False)
#Linear Baseline
lr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),
train_y.cpu().data.numpy().reshape(-1, 1))
preds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))
lr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)
print("LR mse", lr_mse)
mse_file.write("Linear mse: " + str(lr_mse))
mse_file.close()
'''
print("Total execution time: ", time.time() - start)
|
#!/usr/bin/env python
import os
import json
import codecs
import base64
from copy import copy
from lulu import config
from lulu.util import fs
from lulu.extractor import SimpleExtractor
from lulu.common import (
r1,
match1,
url_info,
print_info,
get_content,
post_content,
get_location,
get_filename,
download_urls,
get_decoded_html,
playlist_not_supported,
print_more_compatible as print
)
__all__ = ['netease_download']
site_info = '163.com'
header = copy(config.FAKE_HEADERS)
header.update({
'Referer': 'http://music.163.com/',
'Host': 'music.163.com',
})
class Netease(SimpleExtractor):
def __init__(self):
super().__init__()
self.site_info = site_info
self.enc_sec_key = self.rsa_encrypt(
config.NETEASE_MUSIC_SECKEY, config.NETEASE_MUSIC_PUBKEY,
config.NETEASE_MUSIC_COMMENT_MODULE
)
def rsa_encrypt(self, text, pub_key, modulus):
text = text[::-1]
rs = int(
codecs.encode(bytes(text, encoding='utf8'), 'hex'), 16
)**int(pub_key, 16) % int(modulus, 16)
return format(rs, 'x').zfill(256)
def aes_encrypt(self, text, sec_key):
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
from cryptography.hazmat.backends import default_backend
backend = default_backend()
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
cipher = Cipher(
algorithms.AES(sec_key.encode('utf-8')),
modes.CBC(b'0102030405060708'),
backend=backend
)
encryptor = cipher.encryptor()
ciphertext = encryptor.update(text.encode('utf-8')) \
+ encryptor.finalize()
ciphertext = base64.b64encode(ciphertext)
return ciphertext
def create_params(self, song_id):
text = '{{"ids":[{}], br:"320000", csrf_token:"csrf"}}'.format(song_id)
nonce = '0CoJUm6Qyw8W8jud'
nonce2 = 16 * 'F'
enc_text = self.aes_encrypt(
self.aes_encrypt(text, nonce).decode('utf-8'), nonce2
)
return enc_text
def get_mp3_link(self, song_id):
data = {
'params': self.create_params(song_id),
'encSecKey': self.enc_sec_key,
}
url = config.NETEASE_MP3_URL
req = post_content(
url, headers=header,
post_data=data, decoded=False
)
data = json.loads(req.decode('utf-8'))
return data['data'][0]['url']
def extract(self, url, **kwargs):
if '163.fm' in url:
url = get_location(url)
if 'music.163.com' in url:
self.need_download = False
self.netease_cloud_music_download(url, **kwargs)
else:
html = get_decoded_html(url)
title = r1('movieDescription=\'([^\']+)\'', html) or \
r1('<title>(.+)</title>', html)
if title[0] == ' ':
title = title[1:]
src = r1(r'<source src="([^"]+)"', html) or \
r1(r'<source type="[^"]+" src="([^"]+)"', html)
if src:
url = src
_, ext, size = url_info(src)
else:
url = (
r1(r'["\'](.+)-list.m3u8["\']', html) or
r1(r'["\'](.+).m3u8["\']', html)
) + '.mp4'
_, _, size = url_info(url)
ext = 'mp4'
return {
'urls': [url],
'title': title,
'file_format': ext,
'size': size,
}
def netease_cloud_music_download(
self, url, output_dir='.', info_only=False, **kwargs
):
rid = match1(url, r'\Wid=(.*)')
if rid is None:
rid = match1(url, r'/(\d+)/?')
if 'album' in url:
j = json.loads(get_content(
'http://music.163.com/api/album/{}?id={}&csrf_token='.format(
rid, rid
),
headers=header
))
artist_name = j['album']['artists'][0]['name']
album_name = j['album']['name'].strip()
new_dir = output_dir + '/' + fs.legitimize(
'{} - {}'.format(artist_name, album_name)
)
if not info_only:
if not os.path.exists(new_dir):
os.mkdir(new_dir)
cover_url = j['album']['picUrl']
download_urls([cover_url], 'cover', 'jpg', 0, new_dir)
for song in j['album']['songs']:
self.netease_song_download(
song, output_dir=new_dir, info_only=info_only
)
# download lyrics
self.netease_lyric_download(
song, output_dir=new_dir, info_only=info_only, **kwargs
)
elif 'playlist' in url:
j = json.loads(get_content(
'http://music.163.com/api/playlist/detail?'
'id={}&csrf_token='.format(rid),
headers=header
))
new_dir = output_dir + '/' + fs.legitimize(j['result']['name'])
if not info_only:
if not os.path.exists(new_dir):
os.mkdir(new_dir)
cover_url = j['result']['coverImgUrl']
download_urls([cover_url], 'cover', 'jpg', 0, new_dir)
prefix_width = len(str(len(j['result']['tracks'])))
for n, song in enumerate(j['result']['tracks']):
playlist_prefix = '{:0>{}d}_'.format(n, prefix_width)
self.netease_song_download(
song, output_dir=new_dir, info_only=info_only,
playlist_prefix=playlist_prefix
)
# download lyrics
self.netease_lyric_download(
song, output_dir=new_dir,
info_only=info_only, playlist_prefix=playlist_prefix,
**kwargs
)
elif 'song' in url:
j = json.loads(get_content(
'http://music.163.com/api/song/detail/?'
'id={}&ids=[{}]&csrf_token='.format(rid, rid),
headers=header
))
song = j['songs'][0]
self.netease_song_download(
song, output_dir=output_dir, info_only=info_only
)
# download lyrics
self.netease_lyric_download(
song, output_dir=output_dir, info_only=info_only, **kwargs
)
elif 'program' in url:
j = json.loads(get_content(
'http://music.163.com/api/dj/program/detail/?'
'id={}&ids=[{}]&csrf_token='.format(rid, rid),
headers=header
))
self.netease_song_download(
j['program']['mainSong'], output_dir=output_dir,
info_only=info_only
)
elif 'radio' in url:
j = json.loads(get_content(
'http://music.163.com/api/dj/program/byradio/?'
'radioId={}&ids=[{}]&csrf_token='.format(rid, rid),
headers=header
))
for i in j['programs']:
self.netease_song_download(
i['mainSong'], output_dir=output_dir, info_only=info_only
)
elif 'mv' in url:
j = json.loads(get_content(
'http://music.163.com/api/mv/detail/?'
'id={}&ids=[{}]&csrf_token='.format(rid, rid),
headers=header
))
self.netease_video_download(
j['data'], output_dir=output_dir, info_only=info_only
)
def netease_lyric_download(
self, song, output_dir, info_only, playlist_prefix='', **kwargs
):
if info_only or not kwargs.get('caption'):
return
data = json.loads(get_content(
'http://music.163.com/api/song/lyric/?'
'id={}&lv=-1&csrf_token='.format(song['id']),
headers={'Referer': 'http://music.163.com/'}
))
title = '{}{}. {}'.format(
playlist_prefix, song['position'], song['name']
)
filename = '{}.lrc'.format(get_filename(title))
print('Saving {} ...'.format(filename), end='', flush=True)
with open(
os.path.join(output_dir, filename), 'w', encoding='utf-8'
) as x:
x.write(data['lrc']['lyric'])
print('Done.')
def netease_video_download(self, vinfo, output_dir, info_only):
title = '{} - {}'.format(vinfo['name'], vinfo['artistName'])
url_best = sorted(
vinfo['brs'].items(), reverse=True, key=lambda x: int(x[0])
)[0][1]
self.netease_download_common(
title, url_best, output_dir=output_dir, info_only=info_only
)
def netease_song_download(
self, song, output_dir, info_only, playlist_prefix=''
):
title = '{}{}. {}'.format(
playlist_prefix, song['position'], song['name']
)
url_best = self.get_mp3_link(song['id'])
self.netease_download_common(
title, url_best, output_dir=output_dir, info_only=info_only
)
def netease_download_common(self, title, url_best, output_dir, info_only):
songtype, ext, size = url_info(url_best)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url_best], title, ext, size, output_dir)
download = netease_download = Netease()
download_playlist = playlist_not_supported(site_info)
|
import copy
from gtfspy.routing.label import compute_pareto_front
from gtfspy.routing.node_profile_analyzer_time import NodeProfileAnalyzerTime
from gtfspy.routing.profile_block_analyzer import ProfileBlockAnalyzer
from gtfspy.routing.profile_block import ProfileBlock
class FastestPathAnalyzer:
def __init__(self, labels, start_time_dep, end_time_dep, walk_duration=float('inf'), label_props_to_consider=None, **kwargs):
"""
Parameters
----------
labels: list
List of labels (each label should at least have attributes "departure_time" and "arrival_time")
walk_duration: float
What is the maximum duration for a journey to be considered.
label_props_to_consider: list
"""
for label in labels:
assert (hasattr(label, "departure_time"))
assert (hasattr(label, "arrival_time_target"))
self.start_time_dep = start_time_dep
self.end_time_dep = end_time_dep
self.walk_duration = walk_duration
if label_props_to_consider is None:
self.label_props = []
else:
self.label_props = label_props_to_consider
self._fastest_path_labels = self._compute_fastest_path_labels(labels)
# assert each label has the required properties
for label in self._fastest_path_labels:
for prop in self.label_props:
assert (hasattr(label, prop))
self.kwargs = kwargs
def _compute_fastest_path_labels(self, labels):
relevant_labels = [label.get_copy() for label in labels if (self.start_time_dep < label.departure_time <= self.end_time_dep)]
if len(relevant_labels) is 0 or relevant_labels[-1].departure_time < self.end_time_dep:
# add an after label
smallest_arr_time_after_end_time = float('inf')
smallest_arr_time_label = None
for label in labels:
if self.end_time_dep < label.departure_time and (label.arrival_time_target < smallest_arr_time_after_end_time):
smallest_arr_time_after_end_time = label.arrival_time_target
smallest_arr_time_label = label
if smallest_arr_time_label is not None:
relevant_labels.append(smallest_arr_time_label.get_copy())
for label in relevant_labels:
if hasattr(label, "first_leg_is_walk"):
label.first_leg_is_walk = False
fp_labels = list(reversed(compute_pareto_front(relevant_labels, ignore_n_boardings=True)))
# assert ordered:
for i in range(len(fp_labels) - 1):
try:
assert (fp_labels[i].arrival_time_target <= fp_labels[i + 1].arrival_time_target)
assert (fp_labels[i].departure_time < fp_labels[i + 1].departure_time)
except AssertionError as e:
for fp_label in fp_labels:
print(fp_label)
print(fp_labels[i].arrival_time_target, fp_labels[i + 1].arrival_time_target)
print(fp_labels[i].departure_time, fp_labels[i + 1].departure_time)
raise e
return fp_labels
def get_fastest_path_labels(self, include_next_label_outside_interval=False):
if include_next_label_outside_interval or not self._fastest_path_labels:
return self._fastest_path_labels
else:
if self._fastest_path_labels[-1].departure_time == self.end_time_dep:
return self._fastest_path_labels
else:
return self._fastest_path_labels[:-1]
def calculate_pre_journey_waiting_times_ignoring_direct_walk(self):
previous_label = None
for label in self._fastest_path_labels:
if previous_label:
label.pre_journey_wait_fp = label.departure_time - previous_label.departure_time
else:
label.pre_journey_wait_fp = label.departure_time - self.start_time_dep
previous_label = label
def get_fastest_path_temporal_distance_blocks(self):
"""
Returns
-------
blocks: list[ProfileBlock]
"""
def _label_to_prop_dict(label):
return {prop: getattr(label, prop) for prop in self.label_props}
labels = self._fastest_path_labels
for i in range(len(labels) - 1):
assert (labels[i].departure_time < labels[i + 1].departure_time)
previous_dep_time = self.start_time_dep
blocks = []
for label in labels:
if previous_dep_time >= self.end_time_dep:
break
end_time = min(label.departure_time, self.end_time_dep)
assert (end_time >= previous_dep_time)
temporal_distance_start = label.duration() + (label.departure_time - previous_dep_time)
if temporal_distance_start > self.walk_duration:
split_point_x_computed = label.departure_time - (self.walk_duration - label.duration())
split_point_x = min(split_point_x_computed, end_time)
if previous_dep_time < split_point_x:
# add walk block, only if it is required
walk_block = ProfileBlock(previous_dep_time,
split_point_x,
self.walk_duration,
self.walk_duration,
**_label_to_prop_dict(label))
blocks.append(walk_block)
if split_point_x < end_time:
trip_block = ProfileBlock(split_point_x, end_time,
label.duration() + (end_time - split_point_x),
label.duration(),
**_label_to_prop_dict(label))
blocks.append(trip_block)
else:
journey_block = ProfileBlock(
previous_dep_time,
end_time,
temporal_distance_start,
temporal_distance_start - (end_time - previous_dep_time),
**_label_to_prop_dict(label))
blocks.append(journey_block)
previous_dep_time = blocks[-1].end_time
if previous_dep_time < self.end_time_dep:
last_block = ProfileBlock(previous_dep_time,
self.end_time_dep,
self.walk_duration,
self.walk_duration)
blocks.append(last_block)
return blocks
def get_time_analyzer(self):
"""
Returns
-------
NodeProfileAnalyzerTime
"""
return NodeProfileAnalyzerTime(self._fastest_path_labels,
self.walk_duration,
self.start_time_dep,
self.end_time_dep)
def get_props(self):
return list(self.label_props)
def get_temporal_distance_analyzer(self):
kwargs = self.kwargs
return ProfileBlockAnalyzer(self.get_fastest_path_temporal_distance_blocks(), **kwargs)
def get_prop_analyzer_for_pre_journey_wait(self):
kwargs = self.kwargs
prop_blocks = []
fp_blocks = self.get_fastest_path_temporal_distance_blocks()
for b in fp_blocks:
if b.distance_end == float("inf"):
prop_block = b
elif b.is_flat():
prop_block = ProfileBlock(b.start_time, b.end_time, 0, 0)
else:
prop_block = ProfileBlock(b.start_time, b.end_time, b.width(), 0)
prop_blocks.append(prop_block)
return ProfileBlockAnalyzer(prop_blocks, **kwargs)
def get_prop_analyzer_flat(self, property, value_no_next_journey, value_cutoff):
"""
Get a journey property analyzer, where each journey is weighted by the number of.
Parameters
----------
property: string
Name of the property, needs to be one of label_props given on initialization.
value_no_next_journey:
Value of the profile, when there is no next journey available.
value_cutoff: number
default value of the property when cutoff is applied
Returns
-------
ProfileBlockAnalyzer
"""
kwargs = self.kwargs
fp_blocks = self.get_fastest_path_temporal_distance_blocks()
prop_blocks = []
for b in fp_blocks:
if b.is_flat():
if b.distance_end == self.walk_duration and b.distance_end != float('inf'):
prop_value = value_cutoff
else:
prop_value = value_no_next_journey
else:
prop_value = b[property]
prop_block = ProfileBlock(b.start_time, b.end_time, prop_value, prop_value)
prop_blocks.append(prop_block)
return ProfileBlockAnalyzer(prop_blocks, **kwargs)
|
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.homogenization.utils import iter_sym
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
## expr = """
## e = 1/2 * (grad( vec( u ) ) + grad( vec( u ) ).T)
## D = map( D_sym )
## s = D * e
## div( s )
## """
## """
## e[i,j] = 1/2 * (der[j]( u[i] ) + der[i]( u[j] ))
## map =
## D[i,j,k,l]
## s[i,j] = D[i,j,k,l] * e[k,l]
## """
class LinearElasticTerm(Term):
r"""
General linear elasticity term, with :math:`D_{ijkl}` given in
the usual matrix form exploiting symmetry: in 3D it is :math:`6\times6`
with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it is
:math:`3\times3` with the indices ordered as :math:`[11, 22, 12]`. Can be
evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
:Arguments 1:
- material : :math:`D_{ijkl}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`D_{ijkl}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_lin_elastic'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
arg_shapes = {'material' : 'S, S', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
## symbolic = {'expression': expr,
## 'map' : {'u' : 'state', 'D_sym' : 'material'}}
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mode == 'weak':
if diff_var is None:
strain = self.get(state, 'cauchy_strain')
fmode = 0
else:
strain = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return 1.0, strain, mat, vg, fmode
elif mode == 'eval':
strain1 = self.get(virtual, 'cauchy_strain')
strain2 = self.get(state, 'cauchy_strain')
return 1.0, strain1, strain2, mat, vg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_lin_elastic
else:
self.function = terms.d_lin_elastic
class LinearElasticIsotropicTerm(LinearElasticTerm):
r"""
Isotropic linear elasticity term.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u}) \mbox{ with }
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
:Arguments:
- material_1 : :math:`\lambda`
- material_2 : :math:`\mu`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`D_{ijkl}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_iso'
arg_types = (('material_1', 'material_2', 'virtual', 'state'),
('material_1', 'material_2', 'parameter_1', 'parameter_2'))
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'virtual' : ('D', 'state'), 'state' : 'D',
'parameter_1' : 'D', 'parameter_2' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
def get_fargs(self, lam, mu, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.mechanics.matcoefs import stiffness_from_lame
mat = stiffness_from_lame(self.region.dim, lam, mu)[:, :, 0, 0, :, :]
return LinearElasticTerm.get_fargs(self, mat, virtual, state,
mode=mode, term_mode=term_mode,
diff_var=diff_var, **kwargs)
def get_eval_shape(self, mat1, mat2, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
return LinearElasticTerm.get_eval_shape(self, None, None, state)
class SDLinearElasticTerm(Term):
r"""
Sensitivity analysis of the linear elastic term.
:Definition:
.. math::
\int_{\Omega} \hat{D}_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
.. math::
\hat{D}_{ijkl} = D_{ijkl}(\nabla \cdot \ul{\Vcal})
- D_{ijkq}{\partial \Vcal_l \over \partial x_q}
- D_{iqkl}{\partial \Vcal_j \over \partial x_q}
:Arguments:
- material : :math:`D_{ijkl}`
- parameter_w : :math:`\ul{w}`
- parameter_u : :math:`\ul{u}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_lin_elastic'
arg_types = ('material', 'parameter_w', 'parameter_u',
'parameter_mesh_velocity')
arg_shapes = {'material' : 'S, S',
'parameter_w' : 'D', 'parameter_u' : 'D',
'parameter_mesh_velocity' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
function = terms.d_sd_lin_elastic
def get_fargs(self, mat, par_w, par_u, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
grad_w = self.get(par_w, 'grad')
grad_u = self.get(par_u, 'grad')
grad_mv = self.get(par_mv, 'grad')
return 1.0, grad_w, grad_u, grad_mv, mat, vg
def get_eval_shape(self, mat, par_w, par_u, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class LinearElasticTHTerm(THTerm):
r"""
Fading memory linear elastic (viscous) term. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} \left [\int_0^t
\Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{u}(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v})
:Arguments:
- ts : :class:`TimeStepper` instance
- material : :math:`\Hcal_{ijkl}(\tau)`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_th'
arg_types = ('ts', 'material', 'virtual', 'state')
arg_shapes = {'material' : '.: N, S, S',
'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.dw_lin_elastic)
def get_fargs(self, ts, mats, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
if mode == 'weak':
if diff_var is None:
def iter_kernel():
for ii, mat in enumerate(mats):
strain = self.get(state, 'cauchy_strain',
step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt, strain, mat, vg, 0)
fargs = iter_kernel
else:
strain = nm.array([0], ndmin=4, dtype=nm.float64)
mat = nm.tile(mats[0], (n_el, n_qp, 1, 1))
fargs = ts.dt, strain, mat, vg, 1
return fargs
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class LinearElasticETHTerm(ETHTerm):
r"""
This term has the same definition as dw_lin_elastic_th, but assumes an
exponential approximation of the convolution kernel resulting in much
higher efficiency. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} \left [\int_0^t
\Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{u}(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v})
:Arguments:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\Hcal_{ijkl}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_eth'
arg_types = ('ts', 'material_0', 'material_1', 'virtual', 'state')
arg_shapes = {'material_0' : 'S, S', 'material_1' : '1, 1',
'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.dw_lin_elastic)
def get_fargs(self, ts, mat0, mat1, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _, key = self.get_mapping(state, return_key=True)
if diff_var is None:
strain = self.get(state, 'cauchy_strain')
key += tuple(self.arg_names[ii] for ii in [1, 2, 4])
data = self.get_eth_data(key, state, mat1, strain)
fargs = (ts.dt, data.history + data.values, mat0, vg, 0)
else:
aux = nm.array([0], ndmin=4, dtype=nm.float64)
fargs = (ts.dt, aux, mat0, vg, 1)
return fargs
class LinearPrestressTerm(Term):
r"""
Linear prestress term, with the prestress :math:`\sigma_{ij}` given either
in the usual vector form exploiting symmetry: in 3D it has 6 components
with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it has
3 components with the indices ordered as :math:`[11, 22, 12]`, or in the
matrix (possibly non-symmetric) form. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} \sigma_{ij} e_{ij}(\ul{v})
:Arguments 1:
- material : :math:`\sigma_{ij}`
- virtual : :math:`\ul{v}`
:Arguments 2:
- material : :math:`\sigma_{ij}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_lin_prestress'
arg_types = (('material', 'virtual'),
('material', 'parameter'))
arg_shapes = [{'material' : 'S, 1', 'virtual' : ('D', None),
'parameter' : 'D'},
{'material' : 'D, D'}]
modes = ('weak', 'eval')
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
sh = mat.shape
is_nonsym = sh[2] == sh[3] == vg.dim and not(vg.dim == 1)
if is_nonsym:
mat = mat.reshape(sh[:2] + (vg.dim**2, 1))
if mode == 'weak':
return mat, vg
else:
if is_nonsym:
strain = self.get(virtual, 'grad').transpose((0,1,3,2))
nel, nqp, nr, nc = strain.shape
strain = strain.reshape((nel, nqp, nr*nc, 1))
else:
strain = self.get(virtual, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return strain, mat, vg, fmode
def get_eval_shape(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(virtual)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), virtual.dtype
def d_lin_prestress(self, out, strain, mat, vg, fmode):
aux = dot_sequences(mat, strain, mode='ATB')
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_lin_prestress
else:
self.function = self.d_lin_prestress
class LinearStrainFiberTerm(Term):
r"""
Linear (pre)strain fiber term with the unit direction vector
:math:`\ul{d}`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl} e_{ij}(\ul{v}) \left(d_k d_l\right)
:Arguments:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{d}`
- virtual : :math:`\ul{v}`
"""
name = 'dw_lin_strain_fib'
arg_types = ('material_1', 'material_2', 'virtual')
arg_shapes = {'material_1' : 'S, S', 'material_2' : 'D, 1',
'virtual' : ('D', None)}
function = staticmethod(terms.dw_lin_strain_fib)
def get_fargs(self, mat1, mat2, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
omega = nm.empty(mat1.shape[:3] + (1,), dtype=nm.float64)
for ii, (ir, ic) in enumerate(iter_sym(mat2.shape[2])):
omega[..., ii, 0] = mat2[..., ir, 0] * mat2[..., ic, 0]
return mat1, omega, vg
class CauchyStrainTerm(Term):
r"""
Evaluate Cauchy strain tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22,
12]`. The last three (non-diagonal) components are doubled so that it is
energetically conjugate to the Cauchy stress tensor with the same storage.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \ull{e}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \ull{e}(\ul{w}) /
\int_{T_K} 1
.. math::
\ull{e}(\ul{w})|_{qp}
:Arguments:
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_strain'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, strain, vg, fmode):
if fmode == 2:
out[:] = strain
status = 0
else:
status = terms.de_cauchy_strain(out, strain, vg, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
strain = self.get(parameter, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return strain, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim * (dim + 1) // 2, 1), parameter.dtype
class CauchyStrainSTerm(CauchyStrainTerm):
r"""
Evaluate Cauchy strain tensor on a surface region.
See :class:`CauchyStrainTerm`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Gamma} \ull{e}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \ull{e}(\ul{w}) /
\int_{T_K} 1
.. math::
\ull{e}(\ul{w})|_{qp}
:Arguments:
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_strain_s'
arg_types = ('parameter',)
integration = 'surface_extra'
class CauchyStressTerm(Term):
r"""
Evaluate Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} D_{ijkl} e_{kl}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} D_{ijkl} e_{kl}(\ul{w}) / \int_{T_K} 1
.. math::
D_{ijkl} e_{kl}(\ul{w})|_{qp}
:Arguments:
- material : :math:`D_{ijkl}`
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'S, S', 'parameter' : 'D'}
@staticmethod
def function(out, coef, strain, mat, vg, fmode):
if fmode == 2:
out[:] = dot_sequences(mat, strain)
status = 0
else:
status = terms.de_cauchy_stress(out, strain, mat, vg, fmode)
if coef is not None:
out *= coef
return status
def get_fargs(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
strain = self.get(parameter, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return None, strain, mat, vg, fmode
def get_eval_shape(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim * (dim + 1) // 2, 1), parameter.dtype
class CauchyStressTHTerm(CauchyStressTerm, THTerm):
r"""
Evaluate fading memory Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau} / \int_{T_K} 1
.. math::
\int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau)) \difd{\tau}|_{qp}
:Arguments:
- ts : :class:`TimeStepper` instance
- material : :math:`\Hcal_{ijkl}(\tau)`
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress_th'
arg_types = ('ts', 'material', 'parameter')
arg_shapes = {'material' : '.: N, S, S', 'parameter' : 'D'}
def get_fargs(self, ts, mats, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
def iter_kernel():
for ii, mat in enumerate(mats):
strain = self.get(state, 'cauchy_strain',
step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt, strain, mat, vg, fmode)
return iter_kernel
def get_eval_shape(self, ts, mats, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
out = CauchyStressTerm.get_eval_shape(self, mats, parameter, mode,
term_mode, diff_var, **kwargs)
return out
class CauchyStressETHTerm(CauchyStressTerm, ETHTerm):
r"""
Evaluate fading memory Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Assumes an exponential approximation of the convolution kernel resulting in
much higher efficiency.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau} / \int_{T_K} 1
.. math::
\int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau)) \difd{\tau}|_{qp}
:Arguments:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\Hcal_{ijkl}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress_eth'
arg_types = ('ts', 'material_0', 'material_1', 'parameter')
arg_shapes = {'material_0' : 'S, S', 'material_1' : '1, 1',
'parameter' : 'D'}
def get_fargs(self, ts, mat0, mat1, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _, key = self.get_mapping(state, return_key=True)
strain = self.get(state, 'cauchy_strain')
key += tuple(self.arg_names[1:])
data = self.get_eth_data(key, state, mat1, strain)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return ts.dt, data.history + data.values, mat0, vg, fmode
def get_eval_shape(self, ts, mat0, mat1, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
out = CauchyStressTerm.get_eval_shape(self, mat0, parameter, mode,
term_mode, diff_var, **kwargs)
return out
class NonsymElasticTerm(Term):
r"""
Elasticity term with non-symmetric gradient. The indices of matrix
:math:`D_{ijkl}` are ordered as
:math:`[11, 12, 13, 21, 22, 23, 31, 32, 33]` in 3D and as
:math:`[11, 12, 21, 22]` in 2D.
:Definition:
.. math::
\int_{\Omega} \ull{D} \nabla\ul{u} : \nabla\ul{v}
:Arguments 1:
- material : :math:`\ull{D}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\ull{D}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_nonsym_elastic'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
arg_shapes = {'material' : 'D2, D2', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
geometries = ['2_3', '2_4', '3_4', '3_8']
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0,1,3,2))
nel, nqp, nr, nc = grad.shape
grad = grad.reshape((nel,nqp,nr*nc,1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad').transpose((0,1,3,2))
grad2 = self.get(state, 'grad').transpose((0,1,3,2))
nel, nqp, nr, nc = grad1.shape
return 1.0,\
grad1.reshape((nel,nqp,nr*nc,1)),\
grad2.reshape((nel,nqp,nr*nc,1)),\
mat, vg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_nonsym_elastic
else:
self.function = terms.d_lin_elastic
def _build_wave_strain_op(vec, bf):
dim = len(vec)
if dim == 2:
n0, n1 = vec
nmat = nm.array([[n0, 0],
[0, n1],
[n1, n0]], dtype=nm.float64)
else:
n0, n1, n2 = vec
nmat = nm.array([[n0, 0, 0],
[0, n1, 0],
[0, 0, n2],
[n1, n0, 0],
[n2, 0, n0],
[0, n2, n1]], dtype=nm.float64)
out = nm.einsum('ik,cqkj->cqij', nmat, bf)
return out
from sfepy.base.compat import block
def _build_cauchy_strain_op(bfg):
dim = bfg.shape[2]
if dim == 2:
g1, g2 = bfg[..., 0:1, :], bfg[..., 1:2, :]
zz = nm.zeros_like(g1)
out = block([[g1, zz],
[zz, g2],
[g2, g1]])
else:
g1, g2, g3 = bfg[..., 0:1, :], bfg[..., 1:2, :], bfg[..., 2:3, :]
zz = nm.zeros_like(g1)
out = block([[g1, zz, zz],
[zz, g2, zz],
[zz, zz, g3],
[g2, g1, zz],
[g3, zz, g1],
[zz, g3, g2]])
return out
class ElasticWaveTerm(Term):
r"""
Elastic dispersion term involving the wave strain :math:`g_{ij}`,
:math:`g_{ij}(\ul{u}) = \frac{1}{2}(u_i \kappa_j + \kappa_i u_j)`, with the
wave vector :math:`\ul{\kappa}`. :math:`D_{ijkl}` is given in the usual
matrix form exploiting symmetry: in 3D it is :math:`6\times6` with the
indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it is
:math:`3\times3` with the indices ordered as :math:`[11, 22, 12]`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{v}) g_{kl}(\ul{u})
:Arguments:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_elastic_wave'
arg_types = ('material_1', 'material_2', 'virtual', 'state')
arg_shapes = {'material_1' : 'S, S', 'material_2' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, mat, kappa, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn, expand_basis
geo, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(virtual)
ebf = expand_basis(geo.bf, dim)
mat = Term.tile_mat(mat, n_el)
gmat = _build_wave_strain_op(kappa, ebf)
if diff_var is None:
econn = state.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(state.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = state()[adc]
# Same as nm.einsum('qij,cj->cqi', gmat[0], vals)[..., None]
aux = dot_sequences(gmat, vals[:, None, :, None])
out_qp = dot_sequences(gmat, dot_sequences(mat, aux), 'ATB')
fmode = 0
else:
out_qp = dot_sequences(gmat, dot_sequences(mat, gmat), 'ATB')
fmode = 1
return out_qp, geo, fmode
class ElasticWaveCauchyTerm(Term):
r"""
Elastic dispersion term involving the wave strain :math:`g_{ij}`,
:math:`g_{ij}(\ul{u}) = \frac{1}{2}(u_i \kappa_j + \kappa_i u_j)`, with the
wave vector :math:`\ul{\kappa}` and the elastic strain :math:`e_{ij}`.
:math:`D_{ijkl}` is given in the usual matrix form exploiting symmetry: in
3D it is :math:`6\times6` with the indices ordered as :math:`[11, 22, 33,
12, 13, 23]`, in 2D it is :math:`3\times3` with the indices ordered as
:math:`[11, 22, 12]`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{v}) e_{kl}(\ul{u}) \;,
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{u}) e_{kl}(\ul{v})
:Arguments 1:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- state : :math:`\ul{u}`
- virtual : :math:`\ul{v}`
"""
name = 'dw_elastic_wave_cauchy'
arg_types = (('material_1', 'material_2', 'virtual', 'state'),
('material_1', 'material_2', 'state', 'virtual'))
arg_shapes = {'material_1' : 'S, S', 'material_2' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
modes = ('ge', 'eg')
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, mat, kappa, gvar, evar,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn, expand_basis
geo, _ = self.get_mapping(evar)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(gvar)
ebf = expand_basis(geo.bf, dim)
mat = Term.tile_mat(mat, n_el)
gmat = _build_wave_strain_op(kappa, ebf)
emat = _build_cauchy_strain_op(geo.bfg)
if diff_var is None:
avar = evar if self.mode == 'ge' else gvar
econn = avar.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(avar.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = avar()[adc]
if self.mode == 'ge':
# Same as aux = self.get(avar, 'cauchy_strain'),
aux = dot_sequences(emat, vals[:, None, :, None])
out_qp = dot_sequences(gmat, dot_sequences(mat, aux), 'ATB')
else:
aux = dot_sequences(gmat, vals[:, None, :, None])
out_qp = dot_sequences(emat, dot_sequences(mat, aux), 'ATB')
fmode = 0
else:
if self.mode == 'ge':
out_qp = dot_sequences(gmat, dot_sequences(mat, emat), 'ATB')
else:
out_qp = dot_sequences(emat, dot_sequences(mat, gmat), 'ATB')
fmode = 1
return out_qp, geo, fmode
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for visualizations."""
from __future__ import annotations
import inspect
from extensions.visualizations import models
class Registry:
"""Registry of all visualizations."""
# Dict mapping visualization class names to their classes.
visualizations_dict = {}
@classmethod
def _refresh_registry(cls):
"""Clears and adds new visualization instances to the registry."""
cls.visualizations_dict.clear()
# Add new visualization instances to the registry.
for name, clazz in inspect.getmembers(
models, predicate=inspect.isclass):
if name.endswith('_test') or name == 'BaseVisualization':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseVisualization' in ancestor_names:
cls.visualizations_dict[clazz.__name__] = clazz
@classmethod
def get_visualization_class(cls, visualization_id):
"""Gets a visualization class by its id (which is also its class name).
The registry will refresh if the desired class is not found. If it's
still not found after the refresh, this method will throw an error.
"""
if visualization_id not in cls.visualizations_dict:
cls._refresh_registry()
if visualization_id not in cls.visualizations_dict:
raise TypeError(
'\'%s\' is not a valid visualization id.' % visualization_id)
return cls.visualizations_dict[visualization_id]
@classmethod
def get_all_visualization_ids(cls):
"""Gets a visualization class by its id
(which is also its class name).
"""
if not cls.visualizations_dict:
cls._refresh_registry()
return list(cls.visualizations_dict.keys())
|
class EventLogPermissionAttribute(CodeAccessSecurityAttribute,_Attribute):
"""
Allows declaritive permission checks for event logging.
EventLogPermissionAttribute(action: SecurityAction)
"""
def CreatePermission(self):
"""
CreatePermission(self: EventLogPermissionAttribute) -> IPermission
Creates the permission based on the
System.Diagnostics.EventLogPermissionAttribute.MachineName property and the
requested access levels that are set through the
System.Diagnostics.EventLogPermissionAttribute.PermissionAccess property on the
attribute.
Returns: An System.Security.IPermission that represents the created permission.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,action):
""" __new__(cls: type,action: SecurityAction) """
pass
def __reduce_ex__(self,*args):
pass
MachineName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of the computer on which events might be read.
Get: MachineName(self: EventLogPermissionAttribute) -> str
Set: MachineName(self: EventLogPermissionAttribute)=value
"""
PermissionAccess=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the access levels used in the permissions request.
Get: PermissionAccess(self: EventLogPermissionAttribute) -> EventLogPermissionAccess
Set: PermissionAccess(self: EventLogPermissionAttribute)=value
"""
|
"""
ProjetPythonFirstYear - Petit jeu de labyrinthe python/turtle
Auteur: Alexandre T.
Date: 18/05/2021
Rôle : main.py est le programme principal qui lance le jeu
Entrée: Import du sous programme s'occupant des déplacement du personnage
"""
from Deplacement import *
listen() #Fonction permettant d'attendre les entrées utilisateurs au clavier
#Les 4 fonctions suivantes permettent de définir le rôle des touches qu'on autorise dans le jeu, ici les flèches directionelles du clavier.
onkeypress(deplacer_gauche, "Left")
onkeypress(deplacer_droite, "Right")
onkeypress(deplacer_haut, "Up")
onkeypress(deplacer_bas, "Down")
mainloop()
|
import inspect
from abc import ABCMeta, abstractmethod
from logging import Logger
from typing import Callable, Optional, Any, Dict
from slack_bolt.kwargs_injection.utils import build_required_kwargs
from slack_bolt.request.request import BoltRequest
from slack_bolt.response.response import BoltResponse
class MiddlewareErrorHandler(metaclass=ABCMeta):
@abstractmethod
def handle(
self,
error: Exception,
request: BoltRequest,
response: Optional[BoltResponse],
) -> None:
"""Handles an unhandled exception.
Args:
error: The raised exception.
request: The request.
response: The response.
"""
raise NotImplementedError()
class CustomMiddlewareErrorHandler(MiddlewareErrorHandler):
def __init__(self, logger: Logger, func: Callable[..., Optional[BoltResponse]]):
self.func = func
self.logger = logger
self.arg_names = inspect.getfullargspec(func).args
def handle(
self,
error: Exception,
request: BoltRequest,
response: Optional[BoltResponse],
):
kwargs: Dict[str, Any] = build_required_kwargs(
required_arg_names=self.arg_names,
logger=self.logger,
error=error,
request=request,
response=response,
next_keys_required=False,
)
returned_response = self.func(**kwargs)
if returned_response is not None and isinstance(
returned_response, BoltResponse
):
response.status = returned_response.status
response.headers = returned_response.headers
response.body = returned_response.body
class DefaultMiddlewareErrorHandler(MiddlewareErrorHandler):
def __init__(self, logger: Logger):
self.logger = logger
def handle(
self,
error: Exception,
request: BoltRequest,
response: Optional[BoltResponse],
):
message = f"Failed to run a middleware middleware (error: {error})"
self.logger.exception(message)
|
import insightconnect_plugin_runtime
from .schema import CalculateInput, CalculateOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
import ipcalc
import validators
class Calculate(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="calculate",
description=Component.DESCRIPTION,
input=CalculateInput(),
output=CalculateOutput(),
)
def run(self, params={}):
cidr = params.get(Input.CIDR)
# Test for correct input
if validators.ipv4_cidr(cidr):
subnet = ipcalc.Network(cidr)
else:
raise PluginException(
cause=f"Provided network {cidr} is not in CIDR notation.",
assistance="Please check that the provided network is correct and try again.",
)
# Extract first octet from input
address = cidr.split("/", 1)
separate = cidr.split(".", 1)
octet = int(separate[0])
# Test if IP is is within class A, B, or C
if octet < 128:
bits = 8
ip_class = "A"
elif octet < 192:
bits = 16
ip_class = "B"
elif octet < 224:
bits = 24
ip_class = "C"
else:
raise PluginException(
cause=f"IP address {address[0]} resides in reserved range.",
assistance="Please provide an IP address outside the reserved range.",
)
# Error if an invalid mask is provided for the network class
if int(subnet.subnet()) < bits:
raise PluginException(
cause="Invalid mask for network class.",
assistance="Please provide a valid mask for the network class.",
)
hosts = max(int(subnet.size() - 2), 0)
host_range = "" if not hosts else f"{str(subnet.host_first())} - {str(subnet.host_last())}"
netmask = str(subnet.netmask())
netmask_split = netmask.split(".", 4)
# Calculate wildcard mask
wildcard = []
for i in netmask_split:
wildcard.append(str(255 - int(i)))
wildcard = ".".join(wildcard)
# Calculate number of subnets
borrowed = int(subnet.subnet()) - bits
subnets = 2 ** borrowed
# Subnets should never return zero
if subnets == 0:
subnets = 1
return {
Output.IP: address[0],
Output.NETMASK: netmask,
Output.WILDCARD: wildcard,
Output.CIDR: f"/{address[1]}",
Output.BINARY_NETMASK: subnet.netmask().bin(),
Output.IP_CLASS: ip_class,
Output.SUBNETS: subnets,
Output.HOSTS: hosts,
Output.SUBNET_ID: str(subnet.network()),
Output.HOST_RANGE: host_range,
Output.BROADCAST: str(subnet.broadcast()),
}
|
import importlib.machinery
import inspect
import linecache
import os
import pathlib
import sys
import traceback
from typing import AnyStr, Any, Callable, Tuple, TypeVar, Union
import nbformat
from ..config import load_config
from ..exporter import LiteraryPythonExporter
class NotebookLoader(importlib.machinery.SourcelessFileLoader):
"""Sourceless Jupyter Notebook loader"""
def __init__(self, fullname: str, path: str, config):
super().__init__(fullname, path)
self._config = config
def _update_linecache(self, path: str, source: str):
linecache.cache[path] = (
len(source),
None,
source.splitlines(keepends=True),
path,
)
def get_code(self, fullname: str):
path = self.get_filename(fullname)
body = self.get_transpiled_source(path)
# Ensure that generated source is available for tracebacks
self._update_linecache(path, body)
return compile(body, path, "exec")
def get_transpiled_source(self, path: str):
nb = nbformat.read(path, as_version=nbformat.NO_CONVERT)
exporter = LiteraryPythonExporter(config=self._config)
body, resources = exporter.from_notebook_node(nb)
return body
def determine_package_name(path: pathlib.Path, package_root_path: pathlib.Path) -> str:
"""Determine the corresponding importable name for a package directory given by
a particular file path
:param path: path to package
:param package_root_path: root path containing notebook package directory
:return:
"""
relative_path = path.relative_to(package_root_path)
return ".".join(relative_path.parts)
def _get_loader_details(hook) -> tuple:
"""Return the loader_details for a given FileFinder closure
:param hook: FileFinder closure
:returns: loader_details tuple
"""
try:
namespace = inspect.getclosurevars(hook)
except TypeError as err:
raise ValueError from err
try:
return namespace.nonlocals["loader_details"]
except KeyError as err:
raise ValueError from err
def _find_file_finder(path_hooks: list) -> Tuple[int, Any]:
"""Find the FileFinder closure in a list of path hooks
:param path_hooks: path hooks
:returns: index of hook and the hook itself
"""
for i, hook in enumerate(path_hooks):
try:
_get_loader_details(hook)
except ValueError:
continue
return i, hook
raise ValueError
T = TypeVar("T")
def _extend_file_finder(finder: T, *loader_details) -> T:
"""Extend an existing file finder with new loader details
:param finder: existing FileFinder instance
:param loader_details:
:return:
"""
return importlib.machinery.FileFinder.path_hook(
*_get_loader_details(finder), *loader_details
)
def _inject_notebook_loader(
path_hooks: list, loader_factory: Callable[[str, str], NotebookLoader]
):
"""Inject a NotebookLoader into a list of path hooks
:param path_hooks: list of path hooks
:param loader_factory: factory to to create NotebookLoader
:return:
"""
i, finder = _find_file_finder(path_hooks)
new_finder = _extend_file_finder(finder, (loader_factory, [".ipynb"]))
path_hooks[i] = new_finder
def install_hook(
package_root_path: Union[AnyStr, os.PathLike], set_except_hook: bool = True
):
"""Install notebook import hook
Don't allow the user to specify a custom search path, because we also need this to
interoperate with the default Python module importers which use sys.path
:param package_root_path: root path containing notebook package directory
:param set_except_hook: overwrite `sys.excepthook` to correctly display tracebacks
inside notebooks
:return:
"""
# Load config for project
config = load_config(package_root_path)
# Make notebook packages importable by adding package root path to sys.path
sys.path.append(str(package_root_path))
# Create notebook loader factory
def create_notebook_loader(fullname, path):
return NotebookLoader(fullname, path, config)
# Inject notebook loader into path_hooks
_inject_notebook_loader(sys.path_hooks, create_notebook_loader)
# Python's C-level traceback reporting doesn't call `linecache`, and so retrieves
# the underlying notebook source instead of the generated Python code
if set_except_hook:
sys.excepthook = traceback.print_exception
|
import pytest
from graphene import Node
from saleor.checkout import calculations
from saleor.checkout.utils import add_variant_to_checkout
from saleor.payment import ChargeStatus, TransactionKind
from saleor.payment.models import Payment
from tests.api.utils import get_graphql_content
@pytest.fixture()
def checkout_with_variant(checkout, stock):
variant = stock.product_variant
add_variant_to_checkout(checkout, variant, 1)
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_shipping_method(checkout_with_variant, shipping_method):
checkout = checkout_with_variant
checkout.shipping_method = shipping_method
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_billing_address(checkout_with_shipping_method, address):
checkout = checkout_with_shipping_method
checkout.billing_address = address
checkout.save()
return checkout
@pytest.fixture()
def checkout_with_charged_payment(checkout_with_billing_address):
checkout = checkout_with_billing_address
taxed_total = calculations.checkout_total(checkout)
payment = Payment.objects.create(
gateway="Dummy", is_active=True, total=taxed_total.gross.amount, currency="USD"
)
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.checkout = checkout_with_billing_address
payment.save()
payment.transactions.create(
amount=payment.total,
kind=TransactionKind.CAPTURE,
gateway_response={},
is_success=True,
)
return checkout
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_create_checkout(api_client, graphql_address_data, variant, count_queries):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
discountLocalCurrency {
currency
gross {
amount
localized
}
}
price {
currency
gross {
amount
localized
}
}
priceUndiscounted {
currency
gross {
amount
localized
}
}
priceLocalCurrency {
currency
gross {
amount
localized
}
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation createCheckout($checkoutInput: CheckoutCreateInput!) {
checkoutCreate(input: $checkoutInput) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutInput": {
"email": "test@example.com",
"shippingAddress": graphql_address_data,
"lines": [
{
"quantity": 1,
"variantId": Node.to_global_id("ProductVariant", variant.pk),
}
],
}
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_add_shipping_to_checkout(
api_client,
graphql_address_data,
variant,
checkout_with_variant,
shipping_method,
count_queries,
):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
discountLocalCurrency {
currency
gross {
amount
localized
}
}
price {
currency
gross {
amount
localized
}
}
priceUndiscounted {
currency
gross {
amount
localized
}
}
priceLocalCurrency {
currency
gross {
amount
localized
}
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation updateCheckoutShippingOptions(
$checkoutId: ID!
$shippingMethodId: ID!
) {
checkoutShippingMethodUpdate(
checkoutId: $checkoutId
shippingMethodId: $shippingMethodId
) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_variant.pk),
"shippingMethodId": Node.to_global_id("ShippingMethod", shipping_method.pk),
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_add_billing_address_to_checkout(
api_client, graphql_address_data, checkout_with_shipping_method, count_queries
):
query = """
fragment Price on TaxedMoney {
gross {
amount
localized
}
currency
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
discountLocalCurrency {
currency
gross {
amount
localized
}
}
price {
currency
gross {
amount
localized
}
}
priceUndiscounted {
currency
gross {
amount
localized
}
}
priceLocalCurrency {
currency
gross {
amount
localized
}
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment CheckoutLine on CheckoutLine {
id
quantity
totalPrice {
...Price
}
variant {
...ProductVariant
}
quantity
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
}
fragment ShippingMethod on ShippingMethod {
id
name
price {
currency
amount
localized
}
}
fragment Checkout on Checkout {
token
id
user {
email
}
totalPrice {
...Price
}
subtotalPrice {
...Price
}
billingAddress {
...Address
}
shippingAddress {
...Address
}
email
availableShippingMethods {
...ShippingMethod
}
shippingMethod {
...ShippingMethod
}
shippingPrice {
...Price
}
lines {
...CheckoutLine
}
}
mutation updateCheckoutBillingAddress(
$checkoutId: ID!
$billingAddress: AddressInput!
) {
checkoutBillingAddressUpdate(
checkoutId: $checkoutId
billingAddress: $billingAddress
) {
errors {
field
message
}
checkout {
...Checkout
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_shipping_method.pk),
"billingAddress": graphql_address_data,
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_checkout_payment_charge(
api_client, graphql_address_data, checkout_with_billing_address, count_queries
):
query = """
mutation createPayment($input: PaymentInput!, $checkoutId: ID!) {
checkoutPaymentCreate(input: $input, checkoutId: $checkoutId) {
errors {
field
message
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_billing_address.pk),
"input": {
"billingAddress": graphql_address_data,
"amount": 1000, # 10.00 USD * 100
"gateway": "Dummy",
"token": "charged",
},
}
get_graphql_content(api_client.post_graphql(query, variables))
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_complete_checkout(api_client, checkout_with_charged_payment, count_queries):
query = """
mutation completeCheckout($checkoutId: ID!, $redirectUrl: String) {
checkoutComplete(checkoutId: $checkoutId, redirectUrl: $redirectUrl) {
errors {
field
message
}
order {
id
token
}
}
}
"""
variables = {
"checkoutId": Node.to_global_id("Checkout", checkout_with_charged_payment.pk),
"redirectUrl": "https://www.example.com",
}
get_graphql_content(api_client.post_graphql(query, variables))
|
#!/usr/bin/env python
"""
Find alternative team names for all the teams in the 2018/19 FPL.
"""
import json
from fuzzywuzzy import fuzz
from airsenal.framework.data_fetcher import FPLDataFetcher
def find_best_match(fpl_teams, team):
"""
use fuzzy matching to see if we can match
names
"""
best_ratio = 0.0
best_match = None
for t in fpl_teams:
if fuzz.partial_ratio(t, team) > best_ratio:
best_ratio = fuzz.partial_ratio(t, team)
best_match = t
print("Best match {}/{}, score {}".format(best_match, team, best_ratio))
return best_match, best_ratio
if __name__ == "__main__":
# get the team names as used in FPL
df = FPLDataFetcher()
teamdata = df.get_current_team_data()
teamdict = {
teamdata[k]["name"]: [teamdata[k]["short_name"]] for k in teamdata.keys()
}
# teamdicts = [{teamdata[k]['name']:[teamdata[k]['short_name']]} \
# for k in teamdata.keys()]
fpl_teams = list(teamdict.keys())
# get the team names from the results csv
missing = set()
matched = set()
history_teams = set()
for season in ["1415", "1516", "1617", "1718"]:
filename = "../data/results_{}.csv".format(season)
for line in open(filename).readlines()[1:]:
history_teams.add(line.split(",")[1])
history_teams.add(line.split(",")[2])
for team in history_teams:
if team in fpl_teams:
matched.add(team)
else:
t, score = find_best_match(fpl_teams, team)
if score == 100:
teamdict[t].append(team)
matched.add(team)
# ugh, ok, do the last few by hand
elif team == "Manchester United":
teamdict["Man Utd"].append(team)
matched.add(team)
elif team == "Manchester City":
teamdict["Man City"].append(team)
matched.add(team)
elif team == "Tottenham Hotspur":
teamdict["Spurs"].append(team)
matched.add(team)
else:
missing.add(team)
# matched teams should be all except promoted ones that haven't
# been in the prem recently
print("Num matched: {}".format(len(matched)))
# print missing teams (should be the relegated ones
print("Teams not in this seasons FPL: {}".format(missing))
with open("../data/alternative_team_names.json", "w") as outfile:
outfile.write(json.dumps(teamdict))
|
import os
import pytest
from mp_api.routes.charge_density.client import ChargeDensityRester
import inspect
import typing
resters = [ChargeDensityRester()]
excluded_params = [
"sort_fields",
"chunk_size",
"num_chunks",
"all_fields",
"fields",
]
sub_doc_fields = [] # type: list
alt_name_dict = {} # type: dict
custom_field_tests = {} # type: dict
@pytest.mark.skipif(os.environ.get("MP_API_KEY", None) is None, reason="No API key found.")
@pytest.mark.parametrize("rester", resters)
def test_client(rester):
# Get specific search method
search_method = None
for entry in inspect.getmembers(rester, predicate=inspect.ismethod):
if "search" in entry[0] and entry[0] != "search":
search_method = entry[1]
if search_method is not None:
# Get list of parameters
param_tuples = list(typing.get_type_hints(search_method).items())
# Query API for each numeric and bollean parameter and check if returned
for entry in param_tuples:
param = entry[0]
if param not in excluded_params:
param_type = entry[1].__args__[0]
q = None
if param_type is typing.Tuple[int, int]:
project_field = alt_name_dict.get(param, None)
q = {
param: (-100, 100),
"chunk_size": 1,
"num_chunks": 1,
}
elif param_type is typing.Tuple[float, float]:
project_field = alt_name_dict.get(param, None)
q = {
param: (0, 100.12),
"chunk_size": 1,
"num_chunks": 1,
}
elif param_type is bool:
project_field = alt_name_dict.get(param, None)
q = {
param: False,
"chunk_size": 1,
"num_chunks": 1,
}
elif param in custom_field_tests:
project_field = alt_name_dict.get(param, None)
q = {
param: custom_field_tests[param],
"chunk_size": 1,
"num_chunks": 1,
}
doc = search_method(**q)[0].dict()
for sub_field in sub_doc_fields:
if sub_field in doc:
doc = doc[sub_field]
assert doc[project_field if project_field is not None else param] is not None
def test_download_for_task_ids(tmpdir):
rester = resters[0]
n = rester.download_for_task_ids(
task_ids=["mp-655585", "mp-1057373", "mp-1059589", "mp-1440634", "mp-1791788"], path=tmpdir,
)
files = [f for f in os.listdir(tmpdir)]
assert "mp-1791788.json.gz" in files
|
from six import iteritems
import json
import os
import multiprocessing
import numpy as np
import random
class file_data_loader:
def __next__(self):
raise NotImplementedError
def next(self):
return self.__next__()
def next_batch(self, batch_size):
raise NotImplementedError
class npy_data_loader(file_data_loader):
MODE_INSTANCE = 0 # One batch contains batch_size instances.
MODE_ENTPAIR_BAG = 1 # One batch contains batch_size bags, instances in which have the same entity pair (usually for testing).
MODE_RELFACT_BAG = 2 # One batch contains batch size bags, instances in which have the same relation fact. (usually for training).
def __iter__(self):
return self
def __init__(self, data_dir, prefix, mode, word_vec_npy='vec.npy', shuffle=True, max_length=120, batch_size=160):
if not os.path.isdir(data_dir):
raise Exception("[ERROR] Data dir doesn't exist!")
self.mode = mode
self.shuffle = shuffle
self.max_length = max_length
self.batch_size = batch_size
self.word_vec_mat = np.load(os.path.join(data_dir, word_vec_npy))
self.data_word = np.load(os.path.join(data_dir, prefix + "_word.npy"))
self.data_pos1 = np.load(os.path.join(data_dir, prefix + "_pos1.npy"))
self.data_pos2 = np.load(os.path.join(data_dir, prefix + "_pos2.npy"))
self.data_mask = np.load(os.path.join(data_dir, prefix + "_mask.npy"))
self.data_rel = np.load(os.path.join(data_dir, prefix + "_label.npy"))
self.data_length = np.load(os.path.join(data_dir, prefix + "_len.npy"))
self.scope = np.load(os.path.join(data_dir, prefix + "_instance_scope.npy"))
self.triple = np.load(os.path.join(data_dir, prefix + "_instance_triple.npy"))
self.relfact_tot = len(self.triple)
for i in range(self.scope.shape[0]):
self.scope[i][1] += 1
self.instance_tot = self.data_word.shape[0]
self.rel_tot = 53
if self.mode == self.MODE_INSTANCE:
self.order = list(range(self.instance_tot))
else:
self.order = list(range(len(self.scope)))
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
print("Total relation fact: %d" % (self.relfact_tot))
def __next__(self):
return self.next_batch(self.batch_size)
def next_batch(self, batch_size):
if self.idx >= len(self.order):
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
raise StopIteration
batch_data = {}
if self.mode == self.MODE_INSTANCE:
idx0 = self.idx
idx1 = self.idx + batch_size
if idx1 > len(self.order):
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
raise StopIteration
self.idx = idx1
batch_data['word'] = self.data_word[idx0:idx1]
batch_data['pos1'] = self.data_pos1[idx0:idx1]
batch_data['pos2'] = self.data_pos2[idx0:idx1]
batch_data['rel'] = self.data_rel[idx0:idx1]
batch_data['length'] = self.data_length[idx0:idx1]
batch_data['scope'] = np.stack([list(range(idx1 - idx0)), list(range(1, idx1 - idx0 + 1))], axis=1)
elif self.mode == self.MODE_ENTPAIR_BAG or self.mode == self.MODE_RELFACT_BAG:
idx0 = self.idx
idx1 = self.idx + batch_size
if idx1 > len(self.order):
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
raise StopIteration
self.idx = idx1
_word = []
_pos1 = []
_pos2 = []
_rel = []
_ins_rel = []
_multi_rel = []
_length = []
_scope = []
_mask = []
cur_pos = 0
for i in range(idx0, idx1):
_word.append(self.data_word[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_pos1.append(self.data_pos1[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_pos2.append(self.data_pos2[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_rel.append(self.data_rel[self.scope[self.order[i]][0]])
_ins_rel.append(self.data_rel[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_length.append(self.data_length[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_mask.append(self.data_mask[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
bag_size = self.scope[self.order[i]][1] - self.scope[self.order[i]][0]
_scope.append([cur_pos, cur_pos + bag_size])
cur_pos = cur_pos + bag_size
if self.mode == self.MODE_ENTPAIR_BAG:
_one_multi_rel = np.zeros((self.rel_tot), dtype=np.int32)
for j in range(self.scope[self.order[i]][0], self.scope[self.order[i]][1]):
_one_multi_rel[self.data_rel[j]] = 1
_multi_rel.append(_one_multi_rel)
batch_data['word'] = np.concatenate(_word)
batch_data['pos1'] = np.concatenate(_pos1)
batch_data['pos2'] = np.concatenate(_pos2)
batch_data['rel'] = np.stack(_rel)
batch_data['ins_rel'] = np.concatenate(_ins_rel)
if self.mode == self.MODE_ENTPAIR_BAG:
batch_data['multi_rel'] = np.stack(_multi_rel)
batch_data['length'] = np.concatenate(_length)
batch_data['scope'] = np.stack(_scope)
batch_data['mask'] = np.concatenate(_mask)
return batch_data
class json_file_data_loader(file_data_loader):
MODE_INSTANCE = 0 # One batch contains batch_size instances.
MODE_ENTPAIR_BAG = 1 # One batch contains batch_size bags, instances in which have the same entity pair (usually for testing).
MODE_RELFACT_BAG = 2 # One batch contains batch size bags, instances in which have the same relation fact. (usually for training).
def _load_preprocessed_file(self):
name_prefix = '.'.join(self.file_name.split('/')[-1].split('.')[:-1])
word_vec_name_prefix = '.'.join(self.word_vec_file_name.split('/')[-1].split('.')[:-1])
processed_data_dir = '_processed_data'
if not os.path.isdir(processed_data_dir):
return False
word_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_word.npy')
pos1_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_pos1.npy')
pos2_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_pos2.npy')
rel_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_rel.npy')
mask_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_mask.npy')
length_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_length.npy')
entpair2scope_file_name = os.path.join(processed_data_dir, name_prefix + '_entpair2scope.json')
relfact2scope_file_name = os.path.join(processed_data_dir, name_prefix + '_relfact2scope.json')
word_vec_mat_file_name = os.path.join(processed_data_dir, word_vec_name_prefix + '_mat.npy')
word2id_file_name = os.path.join(processed_data_dir, word_vec_name_prefix + '_word2id.json')
if not os.path.exists(word_npy_file_name) or \
not os.path.exists(pos1_npy_file_name) or \
not os.path.exists(pos2_npy_file_name) or \
not os.path.exists(rel_npy_file_name) or \
not os.path.exists(mask_npy_file_name) or \
not os.path.exists(length_npy_file_name) or \
not os.path.exists(entpair2scope_file_name) or \
not os.path.exists(relfact2scope_file_name) or \
not os.path.exists(word_vec_mat_file_name) or \
not os.path.exists(word2id_file_name):
return False
print("Pre-processed files exist. Loading them...")
self.data_word = np.load(word_npy_file_name)
self.data_pos1 = np.load(pos1_npy_file_name)
self.data_pos2 = np.load(pos2_npy_file_name)
self.data_rel = np.load(rel_npy_file_name)
self.data_mask = np.load(mask_npy_file_name)
self.data_length = np.load(length_npy_file_name)
self.entpair2scope = json.load(open(entpair2scope_file_name))
self.relfact2scope = json.load(open(relfact2scope_file_name))
self.word_vec_mat = np.load(word_vec_mat_file_name)
self.word2id = json.load(open(word2id_file_name))
if self.data_word.shape[1] != self.max_length:
print("Pre-processed files don't match current settings. Reprocessing...")
return False
print("Finish loading")
return True
def __init__(self, file_name, word_vec_file_name, rel2id_file_name, mode, shuffle=True, max_length=120, case_sensitive=False, reprocess=False, batch_size=160):
'''
file_name: Json file storing the data in the following format
[
{
'sentence': 'Bill Gates is the founder of Microsoft .',
'head': {'word': 'Bill Gates', ...(other information)},
'tail': {'word': 'Microsoft', ...(other information)},
'relation': 'founder'
},
...
]
word_vec_file_name: Json file storing word vectors in the following format
[
{'word': 'the', 'vec': [0.418, 0.24968, ...]},
{'word': ',', 'vec': [0.013441, 0.23682, ...]},
...
]
rel2id_file_name: Json file storing relation-to-id diction in the following format
{
'NA': 0
'founder': 1
...
}
**IMPORTANT**: make sure the id of NA is 0!
mode: Specify how to get a batch of data. See MODE_* constants for details.
shuffle: Whether to shuffle the data, default as True. You should use shuffle when training.
max_length: The length that all the sentences need to be extend to, default as 120.
case_sensitive: Whether the data processing is case-sensitive, default as False.
reprocess: Do the pre-processing whether there exist pre-processed files, default as False.
batch_size: The size of each batch, default as 160.
'''
self.file_name = file_name
self.word_vec_file_name = word_vec_file_name
self.case_sensitive = case_sensitive
self.max_length = max_length
self.mode = mode
self.shuffle = shuffle
self.batch_size = batch_size
self.rel2id = json.load(open(rel2id_file_name))
if reprocess or not self._load_preprocessed_file(): # Try to load pre-processed files:
# Check files
if file_name is None or not os.path.isfile(file_name):
raise Exception("[ERROR] Data file doesn't exist")
if word_vec_file_name is None or not os.path.isfile(word_vec_file_name):
raise Exception("[ERROR] Word vector file doesn't exist")
# Load files
print("Loading data file...")
self.ori_data = json.load(open(self.file_name, "r"))
print("Finish loading")
print("Loading word vector file...")
self.ori_word_vec = json.load(open(self.word_vec_file_name, "r"))
print("Finish loading")
# Eliminate case sensitive
if not case_sensitive:
print("Elimiating case sensitive problem...")
for i in range(len(self.ori_data)):
self.ori_data[i]['sentence'] = self.ori_data[i]['sentence'].lower()
self.ori_data[i]['head']['word'] = self.ori_data[i]['head']['word'].lower()
self.ori_data[i]['tail']['word'] = self.ori_data[i]['tail']['word'].lower()
print("Finish eliminating")
# Sort data by entities and relations
print("Sort data...")
self.ori_data.sort(key=lambda a: a['head']['id'] + '#' + a['tail']['id'] + '#' + a['relation'])
print("Finish sorting")
# Pre-process word vec
self.word2id = {}
self.word_vec_tot = len(self.ori_word_vec)
UNK = self.word_vec_tot
BLANK = self.word_vec_tot + 1
self.word_vec_dim = len(self.ori_word_vec[0]['vec'])
print("Got {} words of {} dims".format(self.word_vec_tot, self.word_vec_dim))
print("Building word vector matrix and mapping...")
self.word_vec_mat = np.zeros((self.word_vec_tot, self.word_vec_dim), dtype=np.float32)
for cur_id, word in enumerate(self.ori_word_vec):
w = word['word']
if not case_sensitive:
w = w.lower()
self.word2id[w] = cur_id
self.word_vec_mat[cur_id, :] = word['vec']
self.word2id['UNK'] = UNK
self.word2id['BLANK'] = BLANK
print("Finish building")
# Pre-process data
print("Pre-processing data...")
self.instance_tot = len(self.ori_data)
self.entpair2scope = {} # (head, tail) -> scope
self.relfact2scope = {} # (head, tail, relation) -> scope
self.data_word = np.zeros((self.instance_tot, self.max_length), dtype=np.int32)
self.data_pos1 = np.zeros((self.instance_tot, self.max_length), dtype=np.int32)
self.data_pos2 = np.zeros((self.instance_tot, self.max_length), dtype=np.int32)
self.data_rel = np.zeros((self.instance_tot), dtype=np.int32)
self.data_mask = np.zeros((self.instance_tot, self.max_length), dtype=np.int32)
self.data_length = np.zeros((self.instance_tot), dtype=np.int32)
last_entpair = ''
last_entpair_pos = -1
last_relfact = ''
last_relfact_pos = -1
from tqdm import tqdm
for i in tqdm(range(self.instance_tot), ncols=70):
ins = self.ori_data[i]
if ins['relation'] in self.rel2id:
self.data_rel[i] = self.rel2id[ins['relation']]
else:
self.data_rel[i] = self.rel2id['NA']
sentence = ' '.join(ins['sentence'].split()) # delete extra spaces
head = ins['head']['word']
tail = ins['tail']['word']
cur_entpair = ins['head']['id'] + '#' + ins['tail']['id']
cur_relfact = ins['head']['id'] + '#' + ins['tail']['id'] + '#' + ins['relation']
if cur_entpair != last_entpair:
if last_entpair != '':
self.entpair2scope[last_entpair] = [last_entpair_pos, i] # left closed right open
last_entpair = cur_entpair
last_entpair_pos = i
if cur_relfact != last_relfact:
if last_relfact != '':
self.relfact2scope[last_relfact] = [last_relfact_pos, i]
last_relfact = cur_relfact
last_relfact_pos = i
p1 = sentence.find(' ' + head + ' ')
p2 = sentence.find(' ' + tail + ' ')
if p1 == -1:
if sentence[:len(head) + 1] == head + " ":
p1 = 0
elif sentence[-len(head) - 1:] == " " + head:
p1 = len(sentence) - len(head)
else:
p1 = 0 # shouldn't happen
else:
p1 += 1
if p2 == -1:
if sentence[:len(tail) + 1] == tail + " ":
p2 = 0
elif sentence[-len(tail) - 1:] == " " + tail:
p2 = len(sentence) - len(tail)
else:
p2 = 0 # shouldn't happen
else:
p2 += 1
# if p1 == -1 or p2 == -1:
# raise Exception("[ERROR] Sentence doesn't contain the entity, index = {}, sentence = {}, head = {}, tail = {}".format(i, sentence, head, tail))
words = sentence.split()
cur_ref_data_word = self.data_word[i]
cur_pos = 0
pos1 = -1
pos2 = -1
for j, word in enumerate(words):
if j < max_length:
if word in self.word2id:
cur_ref_data_word[j] = self.word2id[word]
else:
cur_ref_data_word[j] = UNK
if cur_pos == p1:
pos1 = j
p1 = -1
if cur_pos == p2:
pos2 = j
p2 = -1
cur_pos += len(word) + 1
for j in range(j + 1, max_length):
cur_ref_data_word[j] = BLANK
self.data_length[i] = len(words)
if len(words) > max_length:
self.data_length[i] = max_length
if pos1 == -1 or pos2 == -1:
raise Exception("[ERROR] Position error, index = {}, sentence = {}, head = {}, tail = {}".format(i, sentence, head, tail))
if pos1 >= max_length:
pos1 = max_length - 1
if pos2 >= max_length:
pos2 = max_length - 1
pos_min = min(pos1, pos2)
pos_max = max(pos1, pos2)
for j in range(max_length):
self.data_pos1[i][j] = j - pos1 + max_length
self.data_pos2[i][j] = j - pos2 + max_length
if j >= self.data_length[i]:
self.data_mask[i][j] = 0
elif j <= pos_min:
self.data_mask[i][j] = 1
elif j <= pos_max:
self.data_mask[i][j] = 2
else:
self.data_mask[i][j] = 3
if last_entpair != '':
self.entpair2scope[last_entpair] = [last_entpair_pos, self.instance_tot] # left closed right open
if last_relfact != '':
self.relfact2scope[last_relfact] = [last_relfact_pos, self.instance_tot]
print("Finish pre-processing")
print("Storing processed files...")
name_prefix = '.'.join(file_name.split('/')[-1].split('.')[:-1])
word_vec_name_prefix = '.'.join(word_vec_file_name.split('/')[-1].split('.')[:-1])
processed_data_dir = '_processed_data'
if not os.path.isdir(processed_data_dir):
os.mkdir(processed_data_dir)
np.save(os.path.join(processed_data_dir, name_prefix + '_word.npy'), self.data_word)
np.save(os.path.join(processed_data_dir, name_prefix + '_pos1.npy'), self.data_pos1)
np.save(os.path.join(processed_data_dir, name_prefix + '_pos2.npy'), self.data_pos2)
np.save(os.path.join(processed_data_dir, name_prefix + '_rel.npy'), self.data_rel)
np.save(os.path.join(processed_data_dir, name_prefix + '_mask.npy'), self.data_mask)
np.save(os.path.join(processed_data_dir, name_prefix + '_length.npy'), self.data_length)
json.dump(self.entpair2scope, open(os.path.join(processed_data_dir, name_prefix + '_entpair2scope.json'), 'w'))
json.dump(self.relfact2scope, open(os.path.join(processed_data_dir, name_prefix + '_relfact2scope.json'), 'w'))
np.save(os.path.join(processed_data_dir, word_vec_name_prefix + '_mat.npy'), self.word_vec_mat)
json.dump(self.word2id, open(os.path.join(processed_data_dir, word_vec_name_prefix + '_word2id.json'), 'w'))
print("Finish storing")
# Prepare for idx
self.instance_tot = self.data_word.shape[0]
self.entpair_tot = len(self.entpair2scope)
self.relfact_tot = 0 # The number of relation facts, without NA.
for key in self.relfact2scope:
if key[-2:] != 'NA':
self.relfact_tot += 1
self.rel_tot = len(self.rel2id)
if self.mode == self.MODE_INSTANCE:
self.order = list(range(self.instance_tot))
elif self.mode == self.MODE_ENTPAIR_BAG:
self.order = list(range(len(self.entpair2scope)))
self.scope_name = []
self.scope = []
for key, value in iteritems(self.entpair2scope):
self.scope_name.append(key)
self.scope.append(value)
elif self.mode == self.MODE_RELFACT_BAG:
self.order = list(range(len(self.relfact2scope)))
self.scope_name = []
self.scope = []
for key, value in iteritems(self.relfact2scope):
self.scope_name.append(key)
self.scope.append(value)
else:
raise Exception("[ERROR] Invalid mode")
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
print("Total relation fact: %d" % (self.relfact_tot))
print('self.scope: %d; self.order: %d' % (len(self.scope), len(self.order)))
# assert 1 == 2
def __iter__(self):
return self
def __next__(self):
return self.next_batch(self.batch_size)
def next_batch(self, batch_size):
if self.idx >= len(self.order):
self.idx = 0
if self.shuffle:
random.shuffle(self.order)
raise StopIteration
batch_data = {}
if self.mode == self.MODE_INSTANCE:
idx0 = self.idx
idx1 = self.idx + batch_size
if idx1 > len(self.order):
idx1 = len(self.order)
self.idx = idx1
batch_data['word'] = self.data_word[idx0:idx1]
batch_data['pos1'] = self.data_pos1[idx0:idx1]
batch_data['pos2'] = self.data_pos2[idx0:idx1]
batch_data['rel'] = self.data_rel[idx0:idx1]
batch_data['mask'] = self.data_mask[idx0:idx1]
batch_data['length'] = self.data_length[idx0:idx1]
batch_data['scope'] = np.stack([list(range(batch_size)), list(range(1, batch_size + 1))], axis=1)
if idx1 - idx0 < batch_size:
padding = batch_size - (idx1 - idx0)
batch_data['word'] = np.concatenate([batch_data['word'], np.zeros((padding, self.data_word.shape[-1]), dtype=np.int32)])
batch_data['pos1'] = np.concatenate([batch_data['pos1'], np.zeros((padding, self.data_pos1.shape[-1]), dtype=np.int32)])
batch_data['pos2'] = np.concatenate([batch_data['pos2'], np.zeros((padding, self.data_pos2.shape[-1]), dtype=np.int32)])
batch_data['mask'] = np.concatenate([batch_data['mask'], np.zeros((padding, self.data_mask.shape[-1]), dtype=np.int32)])
batch_data['rel'] = np.concatenate([batch_data['rel'], np.zeros((padding), dtype=np.int32)])
batch_data['length'] = np.concatenate([batch_data['length'], np.zeros((padding), dtype=np.int32)])
elif self.mode == self.MODE_ENTPAIR_BAG or self.mode == self.MODE_RELFACT_BAG:
idx0 = self.idx
idx1 = self.idx + batch_size
if idx1 > len(self.order):
idx1 = len(self.order)
self.idx = idx1
_word = []
_pos1 = []
_pos2 = []
_mask = []
_rel = []
_ins_rel = []
_multi_rel = []
_entpair = []
_length = []
_scope = []
cur_pos = 0
for i in range(idx0, idx1):
_word.append(self.data_word[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_pos1.append(self.data_pos1[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_pos2.append(self.data_pos2[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_mask.append(self.data_mask[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_rel.append(self.data_rel[self.scope[self.order[i]][0]])
_ins_rel.append(self.data_rel[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
_length.append(self.data_length[self.scope[self.order[i]][0]:self.scope[self.order[i]][1]])
bag_size = self.scope[self.order[i]][1] - self.scope[self.order[i]][0]
_scope.append([cur_pos, cur_pos + bag_size])
cur_pos = cur_pos + bag_size
if self.mode == self.MODE_ENTPAIR_BAG:
_one_multi_rel = np.zeros((self.rel_tot), dtype=np.int32)
for j in range(self.scope[self.order[i]][0], self.scope[self.order[i]][1]):
_one_multi_rel[self.data_rel[j]] = 1
_multi_rel.append(_one_multi_rel)
_entpair.append(self.scope_name[self.order[i]])
for i in range(batch_size - (idx1 - idx0)):
_word.append(np.zeros((1, self.data_word.shape[-1]), dtype=np.int32))
_pos1.append(np.zeros((1, self.data_pos1.shape[-1]), dtype=np.int32))
_pos2.append(np.zeros((1, self.data_pos2.shape[-1]), dtype=np.int32))
_mask.append(np.zeros((1, self.data_mask.shape[-1]), dtype=np.int32))
_rel.append(0)
_ins_rel.append(np.zeros((1), dtype=np.int32))
_length.append(np.zeros((1), dtype=np.int32))
_scope.append([cur_pos, cur_pos + 1])
cur_pos += 1
if self.mode == self.MODE_ENTPAIR_BAG:
_multi_rel.append(np.zeros((self.rel_tot), dtype=np.int32))
_entpair.append('None#None')
batch_data['word'] = np.concatenate(_word)
batch_data['pos1'] = np.concatenate(_pos1)
batch_data['pos2'] = np.concatenate(_pos2)
batch_data['mask'] = np.concatenate(_mask)
batch_data['rel'] = np.stack(_rel)
batch_data['ins_rel'] = np.concatenate(_ins_rel)
if self.mode == self.MODE_ENTPAIR_BAG:
batch_data['multi_rel'] = np.stack(_multi_rel)
batch_data['entpair'] = _entpair
batch_data['length'] = np.concatenate(_length)
batch_data['scope'] = np.stack(_scope)
return batch_data
|
'''
Copyright 2020 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Fri Aug 07 2020
File : create_report.py
'''
import sys
import logging
import argparse
import zipfile
import os
import json
from datetime import datetime
import re
import _version
import report_data
import report_artifacts
import report_errors
import CodeInsight_RESTAPIs.project.upload_reports
###################################################################################
# Test the version of python to make sure it's at least the version the script
# was tested on, otherwise there could be unexpected results
if sys.version_info <= (3, 5):
raise Exception("The current version of Python is less than 3.5 which is unsupported.\n Script created/tested against python version 3.8.1. ")
else:
pass
propertiesFile = "../server_properties.json" # Created by installer or manually
propertiesFile = logfileName = os.path.dirname(os.path.realpath(__file__)) + "/" + propertiesFile
baseURL = "http://localhost:8888" # Required if the core.server.properties files is not used
logfileName = os.path.dirname(os.path.realpath(__file__)) + "/_project_inventory_report.log"
###################################################################################
# Set up logging handler to allow for different levels of logging to be capture
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', filename=logfileName, filemode='w',level=logging.DEBUG)
logger = logging.getLogger(__name__)
####################################################################################
# Create command line argument options
parser = argparse.ArgumentParser()
parser.add_argument('-pid', "--projectID", help="Project ID")
parser.add_argument("-rid", "--reportID", help="Report ID")
parser.add_argument("-authToken", "--authToken", help="Code Insight Authorization Token")
parser.add_argument("-reportOpts", "--reportOptions", help="Options for report content")
#----------------------------------------------------------------------#
def main():
reportName = "Project Inventory Report"
logger.info("Creating %s - %s" %(reportName, _version.__version__))
print("Creating %s - %s" %(reportName, _version.__version__))
# See what if any arguments were provided
args = parser.parse_args()
projectID = args.projectID
reportID = args.reportID
authToken = args.authToken
reportOptions = args.reportOptions
fileNameTimeStamp = datetime.now().strftime("%Y%m%d-%H%M%S")
# Based on how the shell pass the arguemnts clean up the options if on a linux system:w
if sys.platform.startswith('linux'):
reportOptions = reportOptions.replace('""', '"')[1:-1]
#####################################################################################################
# Code Insight System Information
# Pull the base URL from the same file that the installer is creating
try:
file_ptr = open(propertiesFile, "r")
configData = json.load(file_ptr)
baseURL = configData["core.server.url"]
file_ptr.close()
logger.info("Using baseURL from properties file: %s" %propertiesFile)
except:
logger.info("Using baseURL, %s, from create_report.py" %baseURL)
reportOptions = json.loads(reportOptions)
reportOptions = verifyOptions(reportOptions)
logger.debug("Custom Report Provided Arguments:")
logger.debug(" projectID: %s" %projectID)
logger.debug(" reportID: %s" %reportID)
logger.debug(" baseURL: %s" %baseURL)
logger.debug(" reportOptions: %s" %reportOptions)
# Did we fail the options validation?
if "errorMsg" in reportOptions.keys():
reportOptions["reportName"] = reportName
reportOptions["fileNameTimeStamp"] = fileNameTimeStamp
reportOptions["projectID"] = projectID
projectName = "Report_Creation_Error"
numProjects = 0 # No project information gathered
reports = report_errors.create_error_report(reportOptions)
print(" *** ERROR *** Error found validating report options")
else:
reportData = report_data.gather_data_for_report(baseURL, projectID, authToken, reportName, reportOptions)
print(" Report data has been collected")
reportData["fileNameTimeStamp"] = fileNameTimeStamp
projectName = reportData["projectName"]
numProjects = len(reportData["projectList"])
if "errorMsg" in reportData.keys():
reports = report_errors.create_error_report(reportData)
print(" Error report artifacts have been created")
else:
reports = report_artifacts.create_report_artifacts(reportData)
print(" Report artifacts have been created")
print(" Create report archive for upload")
uploadZipfile = create_report_zipfile(reports, reportName, projectName, projectID, numProjects, fileNameTimeStamp)
print(" Upload zip file creation completed")
CodeInsight_RESTAPIs.project.upload_reports.upload_project_report_data(baseURL, projectID, reportID, authToken, uploadZipfile)
print(" Report uploaded to Code Insight")
#########################################################
# Remove the file since it has been uploaded to Code Insight
try:
os.remove(uploadZipfile)
except OSError:
logger.error("Error removing %s" %uploadZipfile)
print("Error removing %s" %uploadZipfile)
logger.info("Completed creating %s" %reportName)
print("Completed creating %s" %reportName)
#----------------------------------------------------------------------#
def verifyOptions(reportOptions):
'''
Expected Options for report:
includeChildProjects - True/False
'''
reportOptions["errorMsg"] = []
trueOptions = ["true", "t", "yes", "y"]
falseOptions = ["false", "f", "no", "n"]
includeChildProjects = reportOptions["includeChildProjects"]
includeComplianceInformation = reportOptions["includeComplianceInformation"]
maxVersionsBack = reportOptions["maxVersionsBack"]
cvssVersion = reportOptions["cvssVersion"]
if includeChildProjects.lower() in trueOptions:
reportOptions["includeChildProjects"] = "true"
elif includeChildProjects.lower() in falseOptions:
reportOptions["includeChildProjects"] = "false"
else:
reportOptions["errorMsg"].append("Invalid option for including child projects: <b>%s</b>. Valid options are <b>True/False</b>" %includeChildProjects)
if includeComplianceInformation.lower() in trueOptions:
reportOptions["includeComplianceInformation"] = True
elif includeComplianceInformation.lower() in falseOptions:
reportOptions["includeComplianceInformation"] = False
else:
reportOptions["errorMsg"].append("Invalid option for including compliance information: <b>%s</b>. Valid options are <b>True/False</b>" %includeComplianceInformation)
if maxVersionsBack.isdigit ():
reportOptions["maxVersionsBack"] = maxVersionsBack
else:
reportOptions["errorMsg"].append("Invalid value for the maximun number of versions from the most recent: <b>%s</b>. An postive interger number is required" %maxVersionsBack)
if cvssVersion.startswith("2"):
reportOptions["cvssVersion"] = "2.0"
elif cvssVersion.startswith("3"):
reportOptions["cvssVersion"] = "3.x"
else:
reportOptions["errorMsg"].append("Invalid option for CVSS Version: <b>%s</b>. Valid options are <b>2.0/3.x</b>" %cvssVersion)
if not reportOptions["errorMsg"]:
reportOptions.pop('errorMsg', None)
return reportOptions
#---------------------------------------------------------------------#
def create_report_zipfile(reportOutputs, reportName, projectName, projectID, numProjects, fileNameTimeStamp):
logger.info("Entering create_report_zipfile")
projectNameForFile = re.sub(r"[^a-zA-Z0-9]+", '-', projectName )
# create a ZipFile object
if numProjects <= 1 :
allFormatZipFile = projectNameForFile + "-" + projectID + "-" + reportName.replace(" ", "_") + "-" + fileNameTimeStamp + ".zip"
else:
allFormatZipFile = projectNameForFile + "-with-children-" + projectID + "-" + reportName.replace(" ", "_") + "-" + fileNameTimeStamp + ".zip"
# create a ZipFile object
allFormatsZip = zipfile.ZipFile(allFormatZipFile, 'w', zipfile.ZIP_DEFLATED)
logger.debug(" Create downloadable archive: %s" %allFormatZipFile)
print(" Create downloadable archive: %s" %allFormatZipFile)
for format in reportOutputs["allFormats"]:
print(" Adding %s to zip" %format)
logger.debug(" Adding %s to zip" %format)
allFormatsZip.write(format)
allFormatsZip.close()
logger.debug( "Downloadable archive created")
print(" Downloadable archive created")
# Now create a temp zipfile of the zipfile along with the viewable file itself
uploadZipflle = allFormatZipFile.replace(".zip", "_upload.zip")
print(" Create zip archive containing viewable and downloadable archive for upload: %s" %uploadZipflle)
logger.debug(" Create zip archive containing viewable and downloadable archive for upload: %s" %uploadZipflle)
zipToUpload = zipfile.ZipFile(uploadZipflle, 'w', zipfile.ZIP_DEFLATED)
zipToUpload.write(reportOutputs["viewable"])
zipToUpload.write(allFormatZipFile)
zipToUpload.close()
logger.debug(" Archive zip file for upload has been created")
print(" Archive zip file for upload has been created")
# Clean up the items that were added to the zipfile
try:
os.remove(allFormatZipFile)
except OSError:
logger.error("Error removing %s" %allFormatZipFile)
print("Error removing %s" %allFormatZipFile)
return -1
for fileName in reportOutputs["allFormats"]:
try:
os.remove(fileName)
except OSError:
logger.error("Error removing %s" %fileName)
print("Error removing %s" %fileName)
return -1
logger.info("Exiting create_report_zipfile")
return uploadZipflle
#----------------------------------------------------------------------#
if __name__ == "__main__":
main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of the `Sequential` model."""
import copy
import warnings
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.module import module
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
SINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '
'a single output tensor. For multi-output '
'layers, use the functional API.')
@keras_export('keras.Sequential', 'keras.models.Sequential')
class Sequential(functional.Functional):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
>>> # Optionally, the first layer can receive an `input_shape` argument:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> # Afterwards, we do automatic shape inference:
>>> model.add(tf.keras.layers.Dense(4))
>>> # This is identical to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(tf.keras.layers.Dense(8))
>>> # Note that you can also omit the `input_shape` argument.
>>> # In that case the model doesn't have any weights until the first call
>>> # to a training/evaluation method (since it isn't yet built):
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> # model.weights not created yet
>>> # Whereas if you specify the input shape, the model gets built
>>> # continuously as you are adding layers:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> model.add(tf.keras.layers.Dense(4))
>>> len(model.weights)
4
>>> # When using the delayed-build pattern (no input shape specified), you can
>>> # choose to manually build your model by calling
>>> # `build(batch_input_shape)`:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> model.build((None, 16))
>>> len(model.weights)
4
```python
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
# Skip the init in FunctionalModel since model doesn't have input/output yet
super(functional.Functional, self).__init__( # pylint: disable=bad-super-call
name=name, autocast=False)
base_layer.keras_api_gauge.get_cell('Sequential').set(True)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._input_dtype = None
self._layer_call_argspecs = {}
self._created_nodes = set()
# Flag that indicate whether the sequential network topology has been
# created. It is false when there isn't any layer, or the layers doesn't
# have input shape.
self._graph_initialized = False
# Unfortunately some Sequential models using custom layers or FeatureColumn
# layers have multiple inputs. This is fundamentally incompatible with
# most of the Sequential API, and we have to disable a number of features
# for such models.
self._use_legacy_deferred_behavior = False
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@trackable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Args:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a Keras tensor created by keras.Input(), we can extract
# the input layer from its keras history and use that without any loss of
# generality.
if hasattr(layer, '_keras_history'):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
logging.warning(
'Please add `keras.layers.InputLayer` instead of `keras.Input` to '
'Sequential model. `keras.Input` is intended to be used by '
'Functional model.')
if isinstance(layer, module.Module):
if not isinstance(layer, base_layer.Layer):
layer = functional.ModuleWrapper(layer)
else:
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
tf_utils.assert_no_legacy_layers([layer])
if not self._is_layer_name_unique(layer):
raise ValueError('All layers added to a Sequential model '
'should have unique names. Name "%s" is already the name'
' of a layer in this model. Update the `name` argument '
'to pass a unique name.' % (layer.name,))
self.built = False
set_inputs = False
self._maybe_create_attribute('_self_tracked_trackables', [])
if not self._self_tracked_trackables:
if isinstance(layer, input_layer.InputLayer):
# Case where the user passes an Input or InputLayer layer via `add`.
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
outputs = nest.flatten(layer._inbound_nodes[-1].outputs)
if len(outputs) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = outputs
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
self.built = True
self._has_explicit_input_shape = True
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
self.built = True
if set_inputs or self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
self._graph_initialized = True
else:
self._self_tracked_trackables.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
@trackable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._self_tracked_trackables.pop()
self._layer_call_argspecs.pop(layer)
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._graph_initialized = False
elif self._graph_initialized:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs)
self.built = True
@trackable.no_automatic_dependency_tracking
def _build_graph_network_for_inferred_shape(self,
input_shape,
input_dtype=None):
if input_shape is None or not self.layers:
return
if not tf2.enabled() or not ops.executing_eagerly_outside_functions():
# This behavior is disabled in V1 or when eager execution is disabled.
return
if (not self._has_explicit_input_shape and
not self._use_legacy_deferred_behavior):
# Determine whether the input shape is novel, i.e. whether the model
# should be rebuilt.
input_shape = tuple(input_shape)
if self._inferred_input_shape is None:
new_shape = input_shape
else:
new_shape = relax_input_shape(self._inferred_input_shape, input_shape)
if (new_shape is not None and new_shape != self._inferred_input_shape):
# A novel shape has been received: we need to rebuild the model.
# In case we are inside a graph function, we step out of it.
with ops.init_scope():
inputs = input_layer.Input(
batch_shape=new_shape,
dtype=input_dtype,
name=self.layers[0].name + '_input')
layer_input = inputs
created_nodes = set()
for layer in self.layers:
# Clear nodes previously created via this method. This prevents
# node accumulation and ensures that e.g. `layer.output` is
# always connected to `model.inputs`
# (this is important e.g. for the feature extraction use case).
# We don't just do `layer._inbound_nodes = []` in order
# not to break shared layers added to Sequential models (which is
# technically illegal as per the `add()` docstring,
# but wasn't previously disabled).
clear_previously_created_nodes(layer, self._created_nodes)
try:
# Create Functional API connection by calling the current layer
layer_output = layer(layer_input)
except: # pylint:disable=bare-except
# Functional API calls may fail for a number of reasons:
# 1) The layer may be buggy. In this case it will be easier for
# the user to debug if we fail on the first call on concrete data,
# instead of our own call on a symbolic input.
# 2) The layer is dynamic (graph-incompatible) and hasn't
# overridden `compute_output_shape`. In this case, it is
# impossible to build a graph network.
# 3) The layer is otherwise incompatible with the Functional API
# (e.g. this is the case for some probabilistic layers that rely
# on hacks and that do not return tensors).
# In all these cases, we should avoid creating a graph network
# (or we simply can't).
self._use_legacy_deferred_behavior = True
return
if len(nest.flatten(layer_output)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# Keep track of nodes just created above
track_nodes_created_by_last_call(layer, created_nodes)
layer_input = layer_output
outputs = layer_output
self._created_nodes = created_nodes
try:
# Initialize a graph Network. This call will never fail for
# a stack of valid Keras layers.
# However some users have layers that are fundamentally incompatible
# with the Functional API, which do not return tensors. In this
# case, we fall back to the legacy deferred behavior.
# TODO(fchollet): consider raising here, as we should not be
# supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # pylint:disable=bare-except
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape
@generic_utils.default
def build(self, input_shape=None):
if self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
self._build_graph_network_for_inferred_shape(input_shape)
if not self.built:
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super(Sequential, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
# If applicable, update the static input shape of the model.
if not self._has_explicit_input_shape:
if not tensor_util.is_tf_type(inputs) and not isinstance(
inputs, np_arrays.ndarray):
# This is a Sequential with mutiple inputs. This is technically an
# invalid use case of Sequential, but we tolerate it for backwards
# compatibility.
self._use_legacy_deferred_behavior = True
self._build_input_shape = nest.map_structure(_get_shape_tuple, inputs)
if tf2.enabled():
logging.warning('Layers in a Sequential model should only have a '
'single input tensor, but we receive a %s input: %s'
'\nConsider rewriting this model with the Functional '
'API.' % (type(inputs), inputs))
else:
self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)
if self._graph_initialized:
if not self.built:
self._init_graph_network(self.inputs, self.outputs)
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
if len(nest.flatten(outputs)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = getattr(outputs, '_keras_mask', None)
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask)
return getattr(outputs, '_keras_mask', None)
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Args:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
warnings.warn('`model.predict_proba()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use `model.predict()` instead.')
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Args:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
warnings.warn('`model.predict_classes()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use instead:'
'* `np.argmax(model.predict(x), axis=-1)`, '
' if your model does multi-class classification '
' (e.g. if it uses a `softmax` last-layer activation).'
'* `(model.predict(x) > 0.5).astype("int32")`, '
' if your model does binary classification '
' (e.g. if it uses a `sigmoid` last-layer activation).')
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in super(Sequential, self).layers:
# `super().layers` include the InputLayer if available (it is filtered out
# of `self.layers`). Note that `self._self_tracked_trackables` is managed
# by the tracking infrastructure and should not be used.
layer_configs.append(generic_utils.serialize_keras_object(layer))
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if not self._is_graph_network and self._build_input_shape is not None:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if (not model.inputs and build_input_shape and
isinstance(build_input_shape, (tuple, list))):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if hasattr(self, '_manual_input_spec'):
return self._manual_input_spec
if self.layers and hasattr(self.layers[0], 'input_spec'):
return self.layers[0].input_spec
return None
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _is_layer_name_unique(self, layer):
for ref_layer in self.layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def _assert_weights_created(self):
if self._graph_initialized:
return
# When the graph has not been initialized, use the Model's implementation to
# to check if the weights has been created.
super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call
def _get_shape_tuple(t):
if hasattr(t, 'shape'):
shape = t.shape
if isinstance(shape, tuple):
return shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
def relax_input_shape(shape_1, shape_2):
if shape_1 is None or shape_2 is None:
return None
if len(shape_1) != len(shape_2):
return None
return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))
def clear_previously_created_nodes(layer, created_nodes):
"""Remove nodes from `created_nodes` from the layer's inbound_nodes."""
for node in layer._inbound_nodes:
prev_layers = node.inbound_layers
for prev_layer in nest.flatten(prev_layers):
prev_layer._outbound_nodes = [
n for n in prev_layer._outbound_nodes
if n not in created_nodes]
layer._inbound_nodes = [
n for n in layer._inbound_nodes if n not in created_nodes]
def track_nodes_created_by_last_call(layer, created_nodes):
"""Adds to `created_nodes` the nodes created by the last call to `layer`."""
if not layer._inbound_nodes:
return
created_nodes.add(layer._inbound_nodes[-1])
prev_layers = layer._inbound_nodes[-1].inbound_layers
for prev_layer in nest.flatten(prev_layers):
if prev_layer._outbound_nodes:
created_nodes.add(prev_layer._outbound_nodes[-1])
|
from page_objects.exercises.po_exercise_1 import *
def test_positive(driver):
set_driver(driver)
open_page()
click_button1()
click_button2()
click_button1()
click_check_soluition()
assert get_trail_text() == Config.TEST_PASS_TEXT
def test_negative(driver):
set_driver(driver)
open_page()
click_button1()
click_button1()
click_button1()
click_check_soluition()
assert get_trail_text() == Config.TEST_FAIL_TEXT
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
import os
import sys
import csv
from pySmartDL import SmartDL
from shapely.geometry import shape
from planet.api.auth import find_api_key
os.chdir(os.path.dirname(os.path.realpath(__file__)))
planethome = os.path.dirname(os.path.realpath(__file__))
idmatch=[]
# Create an empty geojson template
temp = {"coordinates":[], "type":"Polygon"}
try:
PL_API_KEY = find_api_key()
os.environ['PLANET_API_KEY'] = find_api_key()
except:
print('Failed to get Planet Key: Initialize First')
sys.exit()
SESSION = requests.Session()
SESSION.auth = (PL_API_KEY, '')
CAS_URL = 'https://api.planet.com/mosaic/experimental/mosaics/'
# Function to download the geotiffs
def multipart(ids,names, idlist, infile, coverage, local):
if idlist is None and names is not None:
downloader(ids,names, infile, coverage, local)
elif idlist is not None:
with open(idlist) as csvfile:
reader=csv.DictReader(csvfile)
for row in reader:
print('')
print('Processing: '+str(row['name']))
downloader(str(row['id']),str(row['name']),infile, coverage, local)
#Check running orders
def hpage(page,names,coverage, local):
try:
for things in page['items']:
downlink=(things['_links']['download'])
if coverage is not None and int(things['percent_covered']) >= int(coverage):
r = requests.get(downlink,allow_redirects=False)
filelink = r.headers['Location']
filename = str(r.headers['Location']).split('%22')[-2]
fpath=os.path.join(local,names)
if not os.path.exists(fpath):
os.makedirs(fpath)
localpath = os.path.join(fpath,filename)
if not os.path.exists(localpath):
print("Downloading: " + str(localpath))
obj = SmartDL(filelink, localpath)
obj.start()
path = obj.get_dest()
else:
print("File already exists SKIPPING: " + str(localpath))
elif coverage is None:
downlink = things['_links']['download']
r = requests.get(downlink,allow_redirects=False)
filelink=r.headers['Location']
filename=str(r.headers['Location']).split('%22')[-2]
fpath=os.path.join(local,names)
if not os.path.exists(fpath):
os.makedirs(fpath)
localpath = os.path.join(fpath,filename)
if not os.path.exists(localpath):
print("Downloading: " + str(localpath))
obj = SmartDL(filelink, localpath)
obj.start()
path = obj.get_dest()
else:
print("File already exists SKIPPING: " + str(localpath))
except Exception as e:
print(e)
except (KeyboardInterrupt, SystemExit) as e:
print('Program escaped by User')
sys.exit()
# Get item id from item name
def handle_page(names,response):
for items in response['mosaics']:
if items['name']==names:
return items['id']
# Downloader
def downloader(ids,names, infile, coverage, local):
if names is None and ids is not None:
ids=ids
elif names is not None and ids is None:
resp=SESSION.get('https://api.planet.com/basemaps/v1/mosaics')
response=resp.json()
ids=handle_page(names,response)
idmatch.append(ids)
try:
while response['_links'].get('_next') is not None:
page_url = response['_links'].get('_next')
r = requests.get(page_url)
response = r.json()
ids = handle_page(names,response)
idmatch.append(ids)
except Exception as e:
print(e)
for ival in idmatch:
if ival is not None:
ids=ival
elif names is not None and ids is not None:
ids = ids
headers = {'Content-Type': 'application/json'}
try:
if infile.endswith('.geojson'):
with open(infile) as aoi:
aoi_resp = json.load(aoi)
aoi_geom = aoi_resp['features'][0]['geometry']['coordinates']
elif infile.endswith('.json'):
with open (infile) as aoi:
aoi_resp=json.load(aoi)
aoi_geom=aoi_resp['config'][0]['config']['coordinates']
elif infile.endswith('.kml'):
getcoord=kml2coord(infile)
aoi_geom=getcoord
except Exception as e:
print('Could not parse geometry')
print(e)
temp['coordinates'] = aoi_geom
gmain = shape(temp)
gmainbound = (','.join(str(v) for v in list(gmain.bounds)))
gboundlist = gmainbound.split(',')
url = CAS_URL \
+ str(ids) + '/quads?bbox=' + str(gboundlist[0]) \
+ '%2C' + str(gboundlist[1]) + '%2C' + str(gboundlist[2]) \
+ '%2C' + str(gboundlist[3])
#print(url)
main = SESSION.get(url)
if main.status_code == 200:
page=main.json()
hpage(page,names,coverage, local)
while page['_links'].get('_next') is not None:
try:
page_url = page['_links'].get('_next')
result = SESSION.get(page_url)
if result.status_code == 200:
page=result.json()
hpage(page,names,coverage, local)
else:
print(result.status_code)
except Exception as e:
pass
except (KeyboardInterrupt, SystemExit) as e:
print('Program escaped by User')
sys.exit()
# download(ids='af953970-7189-473a-8e26-24397577eaa2',infile=r'C:\Users\samapriya\Downloads\belem.geojson',coverage=None,
# local=r'C:\planet_demo')
# except Exception as e:
# print(e)
|
from __future__ import print_function
__all__ = ["BrowseTag", "OPCBrowseTag"]
from java.lang import Object
class BrowseTag(Object):
def __init__(
self,
name=None,
path=None,
fullPath=None,
type=None,
valueSource=None,
dataType=None,
):
self.name = name
self.path = path
self.fullPath = fullPath
self.type = type
self.valueSource = valueSource
self.dataType = dataType
def getDataType(self):
return self.dataType
def getFullPath(self):
return self.fullPath
def getPath(self):
return self.path
def getTagType(self):
return self.type
def getValueSource(self):
return self.valueSource
def isDB(self):
print(self)
return True
def isExpression(self):
print(self)
return True
def isFolder(self):
print(self)
return True
def isMemory(self):
print(self)
return True
def isOPC(self):
print(self)
return True
def isQuery(self):
print(self)
return True
def isUDT(self):
print(self)
return True
class OPCBrowseTag(Object):
def __init__(
self,
opcServer=None,
type=None,
displayName=None,
displayPath=None,
dataType=None,
opcItemPath=None,
):
self.opcServer = opcServer
self.type = type
self.displayName = displayName
self.displayPath = displayPath
self.dataType = dataType
self.opcItemPath = opcItemPath
def getDataType(self):
return self.dataType
def getDisplayName(self):
return self.displayName
def getDisplayPath(self):
return self.displayPath
def getOpcItemPath(self):
return self.opcItemPath
def getOpcServer(self):
return self.opcServer
def getType(self):
return self.type
|
# Main program - Version 1
# This is an example of how to use the library turboGen.py
# and cmpspec.py
# GENERATING 1D-2D-3D GAUSSIAN STOCHASTIC FIELD WITH A GIVEN POWER SPECTRUM AS INPUT
"""
Author: Stefano Merlini
Created: 14/05/2020
"""
# ____ _ _ __ _ _ ____ __ ____
# ( __)( \/ ) / _\ ( \/ )( _ \( ) ( __)
# ) _) ) ( / \/ \/ \ ) __// (_/\ ) _)
# (____)(_/\_)\_/\_/\_)(_/(__) \____/(____)
# import library
import numpy as np
import turboGen as tg
import time
import matplotlib.pyplot as plt
import cmpspec
import matplotlib.cm
from mpl_toolkits.mplot3d import Axes3D
# ____ ____ ____ ___ ____ ____ _ _ _ _
# / ___)( _ \( __)/ __)(_ _)( _ \/ )( \( \/ )
# \___ \ ) __/ ) _)( (__ )( ) /) \/ (/ \/ \
# (____/(__) (____)\___) (__) (__\_)\____/\_)(_/
# this is the standard kolmogorov spectrum -5/3
#
class k41:
def evaluate(self, k):
espec = pow(k,-5.0/3.0)
return espec
# __ ____ ____ __ ____ __ ____
# / \ ___( \ ( __)( )( __)( ) ( \
# (_/ /(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (__) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 1-D
# GRID RESOLUTION nx
nx = 64
# DOMAIN DEFINITION
lx = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename1 = inputspec + '_' + str(nx) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = 2.0*np.pi/lx
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 1D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx)
print("Grid Resolution", nx)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_x = tg.gaussian1Dcos(lx, nx, nmodes, wn1, whichspect)
#
t1 = time.time() # final time
computing_time = t1 - t0
#
print("It took me ", computing_time, "to generate the 1D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 1-D FIELD
# verify that the generated velocities fit the spectrum
knyquist1D, wavenumbers1D, tkespec1D = cmpspec.compute1Dspectrum(r_x, lx, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/1D_tkespec_' + filename1 + '.txt', np.transpose([wavenumbers1D, tkespec1D]))
# ____ ____ ____ __ ____ __ ____
# (___ \ ___( \ ( __)( )( __)( ) ( \
# / __/(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 2-D
# GRID RESOLUTION nx, ny
nx = 64
ny = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename2 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 2D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly)
print("Grid Resolution", nx, ny)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xy = tg.gaussian2Dcos(lx, ly, nx, ny, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 2D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist2D, wavenumbers2D, tkespec2D = cmpspec.compute2Dspectrum(r_xy, lx, ly, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/2D_tkespec_' + filename2 + '.txt', np.transpose([wavenumbers2D, tkespec2D]))
# ____ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) (__)(____)\____/(____/
# First case. let's assume 3-D
# GRID RESOLUTION nx, ny, nz
nx = 64
ny = 64
nz = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
lz = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename3 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nz) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 3D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly, lz)
print("Grid Resolution", nx, ny, nz)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
dz = lz/nz
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xyz = tg.gaussian3Dcos(lx, ly, lz, nx, ny, nz, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 3D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist3D, wavenumbers3D, tkespec3D = cmpspec.compute3Dspectrum(r_xyz, lx, ly, lz, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/3D_tkespec_' + filename3 + '.txt', np.transpose([wavenumbers3D, tkespec3D]))
# ____ __ __ ____ ____ ____ ____ _ _ __ ____ ____
# ( _ \( ) / \(_ _) ( _ \( __)/ ___)/ )( \( ) (_ _)/ ___)
# ) __// (_/\( O ) )( ) / ) _) \___ \) \/ (/ (_/\ )( \___ \
# (__) \____/ \__/ (__) (__\_)(____)(____/\____/\____/(__) (____/
# PLOT THE 1D, 2D, 3D FIELD IN REAL DOMAIN AND RELATIVE POWER SPECTRUM
# ---------------------------------------------------------------------
# Plot 1D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X = np.arange(0,lx,dx)
plt.plot(X,r_x, 'k-', label='computed')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel(r'$ \rho(x) $')
plt.legend()
plt.grid()
fig.savefig(pathfolder + '/1D_field_' + filename1 + '.pdf')
# Plot 2D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xy, cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/2D_field_' + filename2 + '.pdf')
plt.show()
# Plot 3D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# X, Y, Z = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy),np.arange(0,lz,dz))
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xyz[:,:,1], cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/3D_field_slice_' + filename3 + '.pdf')
plt.show()
# --------------------------------------------------------------
# PLOT NUMERICAL AND THEORICAL POWER SPECTRUM
# Plot in log-log
# --------------------------------------------------------------
# PLOT 1-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers1D[1:6], tkespec1D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers1D[5:], tkespec1D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist1D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/1D_tkespec_' + filename1 + '.pdf')
plt.show()
# PLOT 2-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers2D[1:6], tkespec2D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers2D[5:], tkespec2D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist2D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/2D_tkespec_' + filename2 + '.pdf')
plt.show()
# PLOT 3-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers3D[1:6], tkespec3D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers3D[5:], tkespec3D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist3D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/3D_tkespec_' + filename3 + '.pdf')
plt.show()
# ____ ____ ____ __ __ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( _ \( ) / \(_ _) ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) __// (_/\( O ) )( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) \____/ \__/ (__) (__) (__)(____)\____/(____/
# plt.rc("font", size=10, family='serif')
# fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
# cset = [[],[],[]]
# # this is the example that worked for you:
# Z = r_xyz[0,:,:]
# cset[0] = ax.contourf(Z, X, Y, zdir = 'x', offset = , cmap = matplotlib.cm.get_cmap('plasma'))
# # cset[0] = ax.contourf(X, Y, Z, zdir = 'y', offset = , levels=np.linspace(np.min(Z),np.max(Z),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # now, for the x-constant face, assign the contour to the x-plot-variable:
# # cset[1] = ax.contourf(X, Y, r_xyz[:,:,31], levels=np.linspace(np.min(r_xyz[:,:,31]),np.max(r_xyz[:,:,31]),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # likewise, for the y-constant face, assign the contour to the y-plot-variable:
# # cset[2] = ax.contourf(X, Y, r_xyz[:,:,63] , levels=np.linspace(np.min(r_xyz[:,:,63]),np.max(r_xyz[:,:,63]),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # # setting 3D-axis-limits:
# # ax.set_xlim3d(0,nx)
# # ax.set_ylim3d(0,ny)
# # ax.set_zlim3d(0,nz)
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# plt.grid()
# fig.savefig(pathfolder + '/3D_field_' + filename3 + '.pdf')
# plt.show()
|
import os
import copy
import collections
import warnings
import logging
import inspect
from collections import OrderedDict
from six.moves import configparser
import numpy as np
import tensorflow as tf
class _SettingsContextManager(object):
def __init__(self, manager, tmp_settings):
self._manager = manager
self._tmp_settings = tmp_settings
def __enter__(self):
self._manager.push(self._tmp_settings)
def __exit__(self, exc_type, exc_val, exc_tb):
self._manager.pop()
class _SettingsManager(object):
def __init__(self, cur_settings):
self._cur_settings = cur_settings
self._settings_stack = []
def __getattr__(self, name):
try:
return self._cur_settings[name]
except KeyError:
raise AttributeError("Unknown setting.")
def push(self, extra_settings):
self._settings_stack.append(self._cur_settings)
self._cur_settings = extra_settings
def pop(self):
rem = self._cur_settings
self._cur_settings = self._settings_stack.pop()
return rem
def temp_settings(self, tmp_settings):
return _SettingsContextManager(self, tmp_settings)
def get_settings(self):
return copy.deepcopy(self._cur_settings)
@property
def jitter(self):
return self.numerics.jitter_level
@property
def tf_float(self):
warnings.warn('tf_float is deprecated and will be removed at GPflow '
'version 1.2.0. Use float_type.', DeprecationWarning)
return self.float_type
@property
def tf_int(self):
warnings.warn('tf_int is deprecated and will be removed at GPflow '
'version 1.2.0. Use int_type.', DeprecationWarning)
return self.int_type
@property
def np_float(self):
warnings.warn('np_float is deprecated and will be removed at GPflow '
'version 1.2.0. Use float_type.', DeprecationWarning)
return self.float_type
@property
def np_int(self):
warnings.warn('np_int is deprecated and will be removed at GPflow '
'version 1.2.0. Use int_type.', DeprecationWarning)
return self.int_type
@property
def float_type(self):
return self.dtypes.float_type
@property
def int_type(self):
return self.dtypes.int_type
@property
def logging_level(self):
return self.logging.level
def logger(self):
frame = inspect.currentframe().f_back
module = inspect.getmodule(frame)
name = 'gpflow' if module is None else module.__name__
level = logging.getLevelName(self.logging.level)
logging.basicConfig()
log = logging.getLogger(name)
log.setLevel(level)
return log
class _MutableNamedTuple(OrderedDict):
"""
A class that doubles as a mutable named tuple, to allow settings
to be re-set during
"""
def __init__(self, *args, **kwargs):
super(_MutableNamedTuple, self).__init__(*args, **kwargs)
self._settings_stack = []
self._initialised = True
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if not hasattr(self, "_initialised"):
super(_MutableNamedTuple, self).__setattr__(name, value)
else:
super(_MutableNamedTuple, self).__setitem__(name, value)
# a very simple parser
def _parse(string):
"""
Very simple config values parser.
"""
if not isinstance(string, str):
raise ValueError('Config value "{0}" expected to be string.'
.format(string))
if string in ['true', 'True']:
return True
elif string in ['false', 'False']:
return False
elif string in ['float64', 'float32', 'float16',
'int64', 'int32', 'int16']:
return getattr(np, string)
else:
try:
return int(string)
except ValueError:
pass
try:
return float(string)
except ValueError:
return string
def _namedtuplify(mapping):
"""
Make the dictionary into a nested series of named tuples.
This is what allows accessing by attribute: settings.numerics.jitter
Thank you https://gist.github.com/hangtwenty/5960435
"""
if isinstance(mapping, collections.Mapping):
for key, value in list(mapping.items()):
mapping[key] = _namedtuplify(value)
try:
mapping.pop('__name__')
except KeyError:
pass
# return collections.namedtuple('settingsa', dict(**mapping))(**mapping)
return _MutableNamedTuple(mapping)
return _parse(mapping)
def _read_config_file(path=None):
"""
Reads config file.
First look for config file in the current directory, then in the
user's home directory, then in the same directory as this file.
Tries to find config file both with and without preceeding 'dot'
for hidden files (prefer non-hidden).
"""
cfg = configparser.ConfigParser()
if path is None: # pragma: no cover
dirs = [os.curdir, os.path.expanduser('~'),
os.path.dirname(os.path.realpath(__file__))]
locations = map(os.path.abspath, dirs)
for loc in locations:
if cfg.read(os.path.join(loc, 'gpflowrc')):
break
if cfg.read(os.path.join(loc, '.gpflowrc')):
break
else:
if not cfg.read(path):
raise RuntimeError("Config at '{0}' cannot be read".format(path))
return cfg
__CONFIG = _read_config_file()
__LOADED_SETTINGS = _namedtuplify(__CONFIG._sections)
SETTINGS = _SettingsManager(__LOADED_SETTINGS) # pylint: disable=C0103
|
# coding: utf-8
import pprint
import re
import six
class UpdateRuleActionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'action_id': 'str',
'body': 'UpdateActionReq'
}
attribute_map = {
'instance_id': 'Instance-Id',
'action_id': 'action_id',
'body': 'body'
}
def __init__(self, instance_id=None, action_id=None, body=None):
"""UpdateRuleActionRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._action_id = None
self._body = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.action_id = action_id
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this UpdateRuleActionRequest.
:return: The instance_id of this UpdateRuleActionRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this UpdateRuleActionRequest.
:param instance_id: The instance_id of this UpdateRuleActionRequest.
:type: str
"""
self._instance_id = instance_id
@property
def action_id(self):
"""Gets the action_id of this UpdateRuleActionRequest.
:return: The action_id of this UpdateRuleActionRequest.
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""Sets the action_id of this UpdateRuleActionRequest.
:param action_id: The action_id of this UpdateRuleActionRequest.
:type: str
"""
self._action_id = action_id
@property
def body(self):
"""Gets the body of this UpdateRuleActionRequest.
:return: The body of this UpdateRuleActionRequest.
:rtype: UpdateActionReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateRuleActionRequest.
:param body: The body of this UpdateRuleActionRequest.
:type: UpdateActionReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateRuleActionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qsl
try:
import simplejson as json
except ImportError:
import json
from libcloud.compute.base import NodeLocation, NodeSize, NodeImage
from libcloud.common.types import ProviderError
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver, \
CloudStackAffinityGroupType
from libcloud.compute.types import LibcloudError, Provider, InvalidCredsError
from libcloud.compute.types import KeyPairDoesNotExistError
from libcloud.compute.types import NodeState
from libcloud.compute.providers import get_driver
from libcloud.utils.py3 import assertRaisesRegex
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class CloudStackCommonTestCase(TestCaseMixin):
driver_klass = CloudStackNodeDriver
def setUp(self):
self.driver_klass.connectionCls.conn_class = CloudStackMockHttp
self.driver = self.driver_klass('apikey', 'secret',
path='/test/path',
host='api.dummy.com')
self.driver.path = '/test/path'
self.driver.type = -1
CloudStackMockHttp.type = None
CloudStackMockHttp.fixture_tag = 'default'
self.driver.connection.poll_interval = 0.0
def test_invalid_credentials(self):
CloudStackMockHttp.type = 'invalid_credentials'
driver = self.driver_klass('invalid', 'invalid', path='/test/path',
host='api.dummy.com')
self.assertRaises(InvalidCredsError, driver.list_nodes)
def test_import_keypair_from_string_api_error(self):
CloudStackMockHttp.type = 'api_error'
name = 'test-pair'
key_material = ''
expected_msg = 'Public key is invalid'
assertRaisesRegex(self, ProviderError, expected_msg,
self.driver.import_key_pair_from_string,
name=name, key_material=key_material)
def test_create_node_immediate_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
CloudStackMockHttp.fixture_tag = 'deployfail'
self.assertRaises(
Exception,
self.driver.create_node,
name='node-name', image=image, size=size)
def test_create_node_delayed_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
CloudStackMockHttp.fixture_tag = 'deployfail2'
self.assertRaises(
Exception,
self.driver.create_node,
name='node-name', image=image, size=size)
def test_create_node_default_location_success(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
default_location = self.driver.list_locations()[0]
node = self.driver.create_node(name='fred',
image=image,
size=size)
self.assertEqual(node.name, 'fred')
self.assertEqual(node.public_ips, [])
self.assertEqual(node.private_ips, ['192.168.1.2'])
self.assertEqual(node.extra['zone_id'], default_location.id)
def test_create_node_ex_networks(self):
CloudStackMockHttp.fixture_tag = 'deploynetworks'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='deploynetworks',
location=location,
image=image,
size=size,
networks=networks)
self.assertEqual(node.name, 'deploynetworks')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(len(node.private_ips), 2)
def test_create_node_ex_ipaddress(self):
CloudStackMockHttp.fixture_tag = 'deployip'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
ipaddress = '10.1.0.128'
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='deployip',
location=location,
image=image,
size=size,
networks=networks,
ex_ip_address=ipaddress)
self.assertEqual(node.name, 'deployip')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(node.private_ips[0], ipaddress)
def test_create_node_ex_rootdisksize(self):
CloudStackMockHttp.fixture_tag = 'rootdisksize'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
volumes = self.driver.list_volumes()
rootdisksize = '50'
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='rootdisksize',
location=location,
image=image,
size=size,
networks=networks,
ex_rootdisksize=rootdisksize)
self.assertEqual(node.name, 'rootdisksize')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(1, len(volumes))
self.assertEqual('ROOT-69941', volumes[0].name)
self.assertEqual(53687091200, volumes[0].size)
def test_create_node_ex_start_vm_false(self):
CloudStackMockHttp.fixture_tag = 'stoppedvm'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='stopped_vm',
location=location,
image=image,
size=size,
networks=networks,
ex_start_vm=False)
self.assertEqual(node.name, 'stopped_vm')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(node.state, NodeState.STOPPED)
def test_create_node_ex_security_groups(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
sg = [sg['name'] for sg in self.driver.ex_list_security_groups()]
CloudStackMockHttp.fixture_tag = 'deploysecuritygroup'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
ex_security_groups=sg)
self.assertEqual(node.name, 'test')
self.assertEqual(node.extra['security_group'], sg)
self.assertEqual(node.id, 'fc4fd31a-16d3-49db-814a-56b39b9ef986')
def test_create_node_ex_keyname(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
CloudStackMockHttp.fixture_tag = 'deploykeyname'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
ex_keyname='foobar')
self.assertEqual(node.name, 'test')
self.assertEqual(node.extra['key_name'], 'foobar')
def test_create_node_ex_userdata(self):
self.driver.path = '/test/path/userdata'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
CloudStackMockHttp.fixture_tag = 'deploykeyname'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
ex_userdata='foobar')
self.assertEqual(node.name, 'test')
def test_create_node_project(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
project = self.driver.ex_list_projects()[0]
CloudStackMockHttp.fixture_tag = 'deployproject'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
project=project)
self.assertEqual(node.name, 'TestNode')
self.assertEqual(node.extra['project'], 'Test Project')
def test_list_images_no_images_available(self):
CloudStackMockHttp.fixture_tag = 'notemplates'
images = self.driver.list_images()
self.assertEqual(0, len(images))
def test_list_images(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listTemplates_default.json')
templates = fixture['listtemplatesresponse']['template']
images = self.driver.list_images()
for i, image in enumerate(images):
# NodeImage expects id to be a string,
# the CloudStack fixture has an int
tid = str(templates[i]['id'])
tname = templates[i]['name']
self.assertIsInstance(image.driver, CloudStackNodeDriver)
self.assertEqual(image.id, tid)
self.assertEqual(image.name, tname)
def test_ex_list_disk_offerings(self):
diskOfferings = self.driver.ex_list_disk_offerings()
self.assertEqual(1, len(diskOfferings))
diskOffering, = diskOfferings
self.assertEqual('Disk offer 1', diskOffering.name)
self.assertEqual(10, diskOffering.size)
def test_ex_list_networks(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listNetworks_default.json')
fixture_networks = fixture['listnetworksresponse']['network']
networks = self.driver.ex_list_networks()
for i, network in enumerate(networks):
self.assertEqual(network.id, fixture_networks[i]['id'])
self.assertEqual(
network.displaytext, fixture_networks[i]['displaytext'])
self.assertEqual(network.name, fixture_networks[i]['name'])
self.assertEqual(
network.networkofferingid,
fixture_networks[i]['networkofferingid'])
self.assertEqual(network.zoneid, fixture_networks[i]['zoneid'])
def test_ex_list_network_offerings(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listNetworkOfferings_default.json')
fixture_networkoffers = \
fixture['listnetworkofferingsresponse']['networkoffering']
networkoffers = self.driver.ex_list_network_offerings()
for i, networkoffer in enumerate(networkoffers):
self.assertEqual(networkoffer.id, fixture_networkoffers[i]['id'])
self.assertEqual(networkoffer.name,
fixture_networkoffers[i]['name'])
self.assertEqual(networkoffer.display_text,
fixture_networkoffers[i]['displaytext'])
self.assertEqual(networkoffer.for_vpc,
fixture_networkoffers[i]['forvpc'])
self.assertEqual(networkoffer.guest_ip_type,
fixture_networkoffers[i]['guestiptype'])
self.assertEqual(networkoffer.service_offering_id,
fixture_networkoffers[i]['serviceofferingid'])
def test_ex_create_network(self):
_, fixture = self.driver.connection.connection._load_fixture(
'createNetwork_default.json')
fixture_network = fixture['createnetworkresponse']['network']
netoffer = self.driver.ex_list_network_offerings()[0]
location = self.driver.list_locations()[0]
network = self.driver.ex_create_network(display_text='test',
name='test',
network_offering=netoffer,
location=location,
gateway='10.1.1.1',
netmask='255.255.255.0',
network_domain='cloud.local',
vpc_id="2",
project_id="2")
self.assertEqual(network.name, fixture_network['name'])
self.assertEqual(network.displaytext, fixture_network['displaytext'])
self.assertEqual(network.id, fixture_network['id'])
self.assertEqual(network.extra['gateway'], fixture_network['gateway'])
self.assertEqual(network.extra['netmask'], fixture_network['netmask'])
self.assertEqual(network.networkofferingid,
fixture_network['networkofferingid'])
self.assertEqual(network.extra['vpc_id'], fixture_network['vpcid'])
self.assertEqual(network.extra['project_id'],
fixture_network['projectid'])
def test_ex_delete_network(self):
network = self.driver.ex_list_networks()[0]
result = self.driver.ex_delete_network(network=network)
self.assertTrue(result)
def test_ex_list_nics(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listNics_default.json')
fixture_nic = fixture['listnicsresponse']['nic']
vm = self.driver.list_nodes()[0]
nics = self.driver.ex_list_nics(vm)
for i, nic in enumerate(nics):
self.assertEqual(nic.id, fixture_nic[i]['id'])
self.assertEqual(nic.network_id,
fixture_nic[i]['networkid'])
self.assertEqual(nic.net_mask,
fixture_nic[i]['netmask'])
self.assertEqual(nic.gateway,
fixture_nic[i]['gateway'])
self.assertEqual(nic.ip_address,
fixture_nic[i]['ipaddress'])
self.assertEqual(nic.is_default,
fixture_nic[i]['isdefault'])
self.assertEqual(nic.mac_address,
fixture_nic[i]['macaddress'])
def test_ex_add_nic_to_node(self):
vm = self.driver.list_nodes()[0]
network = self.driver.ex_list_networks()[0]
ip = "10.1.4.123"
result = self.driver.ex_attach_nic_to_node(node=vm, network=network, ip_address=ip)
self.assertTrue(result)
def test_ex_remove_nic_from_node(self):
vm = self.driver.list_nodes()[0]
nic = self.driver.ex_list_nics(node=vm)[0]
result = self.driver.ex_detach_nic_from_node(node=vm, nic=nic)
self.assertTrue(result)
def test_ex_list_vpc_offerings(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listVPCOfferings_default.json')
fixture_vpcoffers = \
fixture['listvpcofferingsresponse']['vpcoffering']
vpcoffers = self.driver.ex_list_vpc_offerings()
for i, vpcoffer in enumerate(vpcoffers):
self.assertEqual(vpcoffer.id, fixture_vpcoffers[i]['id'])
self.assertEqual(vpcoffer.name,
fixture_vpcoffers[i]['name'])
self.assertEqual(vpcoffer.display_text,
fixture_vpcoffers[i]['displaytext'])
def test_ex_list_vpcs(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listVPCs_default.json')
fixture_vpcs = fixture['listvpcsresponse']['vpc']
vpcs = self.driver.ex_list_vpcs()
for i, vpc in enumerate(vpcs):
self.assertEqual(vpc.id, fixture_vpcs[i]['id'])
self.assertEqual(vpc.display_text, fixture_vpcs[i]['displaytext'])
self.assertEqual(vpc.name, fixture_vpcs[i]['name'])
self.assertEqual(vpc.vpc_offering_id,
fixture_vpcs[i]['vpcofferingid'])
self.assertEqual(vpc.zone_id, fixture_vpcs[i]['zoneid'])
def test_ex_list_routers(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listRouters_default.json')
fixture_routers = fixture['listroutersresponse']['router']
routers = self.driver.ex_list_routers()
for i, router in enumerate(routers):
self.assertEqual(router.id, fixture_routers[i]['id'])
self.assertEqual(router.name, fixture_routers[i]['name'])
self.assertEqual(router.state, fixture_routers[i]['state'])
self.assertEqual(router.public_ip, fixture_routers[i]['publicip'])
self.assertEqual(router.vpc_id, fixture_routers[i]['vpcid'])
def test_ex_create_vpc(self):
_, fixture = self.driver.connection.connection._load_fixture(
'createVPC_default.json')
fixture_vpc = fixture['createvpcresponse']
vpcoffer = self.driver.ex_list_vpc_offerings()[0]
vpc = self.driver.ex_create_vpc(cidr='10.1.1.0/16',
display_text='cloud.local',
name='cloud.local',
vpc_offering=vpcoffer,
zone_id="2")
self.assertEqual(vpc.id, fixture_vpc['id'])
def test_ex_delete_vpc(self):
vpc = self.driver.ex_list_vpcs()[0]
result = self.driver.ex_delete_vpc(vpc=vpc)
self.assertTrue(result)
def test_ex_create_network_acllist(self):
_, fixture = self.driver.connection.connection._load_fixture(
'createNetworkACLList_default.json')
fixture_network_acllist = fixture['createnetworkacllistresponse']
vpc = self.driver.ex_list_vpcs()[0]
network_acllist = self.driver.ex_create_network_acllist(
name='test_acllist',
vpc_id=vpc.id,
description='test description')
self.assertEqual(network_acllist.id, fixture_network_acllist['id'])
def test_ex_list_network_acllist(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listNetworkACLLists_default.json')
fixture_acllist = \
fixture['listnetworkacllistsresponse']['networkacllist']
acllist = self.driver.ex_list_network_acllists()
for i, acllist in enumerate(acllist):
self.assertEqual(acllist.id,
fixture_acllist[i]['id'])
self.assertEqual(acllist.name,
fixture_acllist[i]['name'])
self.assertEqual(acllist.description,
fixture_acllist[i]['description'])
def test_ex_create_network_acl(self):
_, fixture = self.driver.connection.connection._load_fixture(
'createNetworkACL_default.json')
fixture_network_acllist = fixture['createnetworkaclresponse']
acllist = self.driver.ex_list_network_acllists()[0]
network_acl = self.driver.ex_create_network_acl(
protocol='test_acllist',
acl_id=acllist.id,
cidr_list='',
start_port='80',
end_port='80')
self.assertEqual(network_acl.id, fixture_network_acllist['id'])
def test_ex_list_projects(self):
_, fixture = self.driver.connection.connection._load_fixture(
'listProjects_default.json')
fixture_projects = fixture['listprojectsresponse']['project']
projects = self.driver.ex_list_projects()
for i, project in enumerate(projects):
self.assertEqual(project.id, fixture_projects[i]['id'])
self.assertEqual(
project.display_text, fixture_projects[i]['displaytext'])
self.assertEqual(project.name, fixture_projects[i]['name'])
self.assertEqual(
project.extra['domainid'],
fixture_projects[i]['domainid'])
self.assertEqual(
project.extra['cpulimit'],
fixture_projects[i]['cpulimit'])
# Note -1 represents unlimited
self.assertEqual(project.extra['networklimit'], -1)
def test_create_volume(self):
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
self.assertEqual(volumeName, volume.name)
self.assertEqual(10, volume.size)
def test_create_volume_no_noncustomized_offering_with_size(self):
"""If the sizes of disk offerings are not configurable and there
are no disk offerings with the requested size, an exception should
be thrown."""
location = self.driver.list_locations()[0]
self.assertRaises(
LibcloudError,
self.driver.create_volume,
'vol-0', location, 11)
def test_create_volume_with_custom_disk_size_offering(self):
CloudStackMockHttp.fixture_tag = 'withcustomdisksize'
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
self.assertEqual(volumeName, volume.name)
def test_create_volume_no_matching_volume_type(self):
"""If the ex_disk_type does not exit, then an exception should be
thrown."""
location = self.driver.list_locations()[0]
self.assertRaises(
LibcloudError,
self.driver.create_volume,
'vol-0', location, 11, ex_volume_type='FooVolumeType')
def test_create_volume_with_defined_volume_type(self):
CloudStackMockHttp.fixture_tag = 'withvolumetype'
volumeName = 'vol-0'
volLocation = self.driver.list_locations()[0]
diskOffering = self.driver.ex_list_disk_offerings()[0]
volumeType = diskOffering.name
volume = self.driver.create_volume(10, volumeName, location=volLocation,
ex_volume_type=volumeType)
self.assertEqual(volumeName, volume.name)
def test_attach_volume(self):
node = self.driver.list_nodes()[0]
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
attachReturnVal = self.driver.attach_volume(volume, node)
self.assertTrue(attachReturnVal)
def test_detach_volume(self):
volumeName = 'gre-test-volume'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
res = self.driver.detach_volume(volume)
self.assertTrue(res)
def test_destroy_volume(self):
volumeName = 'gre-test-volume'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
res = self.driver.destroy_volume(volume)
self.assertTrue(res)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(1, len(volumes))
self.assertEqual('ROOT-69942', volumes[0].name)
def test_ex_get_volume(self):
volume = self.driver.ex_get_volume(2600)
self.assertEqual('ROOT-69942', volume.name)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(2, len(nodes))
self.assertEqual('test', nodes[0].name)
self.assertEqual('2600', nodes[0].id)
self.assertEqual(0, len(nodes[0].private_ips))
self.assertEqual([], nodes[0].extra['security_group'])
self.assertEqual(None, nodes[0].extra['key_name'])
self.assertEqual(1, len(nodes[0].public_ips))
self.assertEqual('1.1.1.116', nodes[0].public_ips[0])
self.assertEqual(1, len(nodes[0].extra['ip_addresses']))
self.assertEqual(34000, nodes[0].extra['ip_addresses'][0].id)
self.assertEqual(1, len(nodes[0].extra['ip_forwarding_rules']))
self.assertEqual('772fd410-6649-43ed-befa-77be986b8906',
nodes[0].extra['ip_forwarding_rules'][0].id)
self.assertEqual(1, len(nodes[0].extra['port_forwarding_rules']))
self.assertEqual('bc7ea3ee-a2c3-4b86-a53f-01bdaa1b2e32',
nodes[0].extra['port_forwarding_rules'][0].id)
self.assertEqual({"testkey": "testvalue", "foo": "bar"},
nodes[0].extra['tags'])
def test_list_nodes_location_filter(self):
def list_nodes_mock(self, **kwargs):
self.assertTrue('zoneid' in kwargs)
self.assertEqual('1', kwargs.get('zoneid'))
body, obj = self._load_fixture('listVirtualMachines_default.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
CloudStackMockHttp._cmd_listVirtualMachines = list_nodes_mock
try:
location = NodeLocation(1, 'Sydney', 'Unknown', self.driver)
self.driver.list_nodes(location=location)
finally:
del CloudStackMockHttp._cmd_listVirtualMachines
def test_list_nodes_noipaddress_filter(self):
def list_nodes_mock(self, **kwargs):
body, obj = self._load_fixture('listVirtualMachines_noipaddress.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
CloudStackMockHttp._cmd_listVirtualMachines = list_nodes_mock
try:
self.driver.list_nodes()
finally:
del CloudStackMockHttp._cmd_listVirtualMachines
def test_ex_get_node(self):
node = self.driver.ex_get_node(2600)
self.assertEqual('test', node.name)
self.assertEqual('2600', node.id)
self.assertEqual([], node.extra['security_group'])
self.assertEqual(None, node.extra['key_name'])
self.assertEqual(1, len(node.public_ips))
self.assertEqual('1.1.1.116', node.public_ips[0])
self.assertEqual(1, len(node.extra['ip_addresses']))
self.assertEqual(34000, node.extra['ip_addresses'][0].id)
def test_ex_get_node_doesnt_exist(self):
self.assertRaises(Exception, self.driver.ex_get_node(26), node_id=26)
def test_list_locations(self):
location = self.driver.list_locations()[0]
self.assertEqual('1', location.id)
self.assertEqual('Sydney', location.name)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual('Compute Micro PRD', sizes[0].name)
self.assertEqual('105', sizes[0].id)
self.assertEqual(384, sizes[0].ram)
self.assertEqual('Compute Large PRD', sizes[2].name)
self.assertEqual('69', sizes[2].id)
self.assertEqual(6964, sizes[2].ram)
def test_ex_start_node(self):
node = self.driver.list_nodes()[0]
res = node.ex_start()
self.assertEqual('Starting', res)
def test_ex_stop_node(self):
node = self.driver.list_nodes()[0]
res = node.ex_stop()
self.assertEqual('Stopped', res)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
res = node.destroy()
self.assertTrue(res)
def test_expunge_node(self):
node = self.driver.list_nodes()[0]
res = self.driver.destroy_node(node, ex_expunge=True)
self.assertTrue(res)
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
res = node.reboot()
self.assertTrue(res)
def test_list_key_pairs(self):
keypairs = self.driver.list_key_pairs()
fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \
'00:00:00:00:00'
self.assertEqual(keypairs[0].name, 'cs-keypair')
self.assertEqual(keypairs[0].fingerprint, fingerprint)
# Test old and deprecated way
keypairs = self.driver.ex_list_keypairs()
self.assertEqual(keypairs[0]['name'], 'cs-keypair')
self.assertEqual(keypairs[0]['fingerprint'], fingerprint)
def test_list_key_pairs_no_keypair_key(self):
CloudStackMockHttp.fixture_tag = 'no_keys'
keypairs = self.driver.list_key_pairs()
self.assertEqual(keypairs, [])
def test_get_key_pair(self):
CloudStackMockHttp.fixture_tag = 'get_one'
key_pair = self.driver.get_key_pair(name='cs-keypair')
self.assertEqual(key_pair.name, 'cs-keypair')
def test_get_key_pair_doesnt_exist(self):
CloudStackMockHttp.fixture_tag = 'get_one_doesnt_exist'
self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair,
name='does-not-exist')
def test_create_keypair(self):
key_pair = self.driver.create_key_pair(name='test-keypair')
self.assertEqual(key_pair.name, 'test-keypair')
self.assertTrue(key_pair.fingerprint is not None)
self.assertTrue(key_pair.private_key is not None)
# Test old and deprecated way
res = self.driver.ex_create_keypair(name='test-keypair')
self.assertEqual(res['name'], 'test-keypair')
self.assertTrue(res['fingerprint'] is not None)
self.assertTrue(res['privateKey'] is not None)
def test_import_keypair_from_file(self):
fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15'
path = os.path.join(os.path.dirname(__file__), 'fixtures',
'cloudstack',
'dummy_rsa.pub')
key_pair = self.driver.import_key_pair_from_file('foobar', path)
self.assertEqual(key_pair.name, 'foobar')
self.assertEqual(key_pair.fingerprint, fingerprint)
# Test old and deprecated way
res = self.driver.ex_import_keypair('foobar', path)
self.assertEqual(res['keyName'], 'foobar')
self.assertEqual(res['keyFingerprint'], fingerprint)
def test_ex_import_keypair_from_string(self):
fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15'
path = os.path.join(os.path.dirname(__file__), 'fixtures',
'cloudstack',
'dummy_rsa.pub')
fh = open(path)
key_material = fh.read()
fh.close()
key_pair = self.driver.import_key_pair_from_string('foobar', key_material=key_material)
self.assertEqual(key_pair.name, 'foobar')
self.assertEqual(key_pair.fingerprint, fingerprint)
# Test old and deprecated way
res = self.driver.ex_import_keypair_from_string('foobar', key_material=key_material)
self.assertEqual(res['keyName'], 'foobar')
self.assertEqual(res['keyFingerprint'], fingerprint)
def test_delete_key_pair(self):
key_pair = self.driver.list_key_pairs()[0]
res = self.driver.delete_key_pair(key_pair=key_pair)
self.assertTrue(res)
# Test old and deprecated way
res = self.driver.ex_delete_keypair(keypair='cs-keypair')
self.assertTrue(res)
def test_ex_list_security_groups(self):
groups = self.driver.ex_list_security_groups()
self.assertEqual(2, len(groups))
self.assertEqual(groups[0]['name'], 'default')
self.assertEqual(groups[1]['name'], 'mongodb')
def test_ex_list_security_groups_no_securitygroup_key(self):
CloudStackMockHttp.fixture_tag = 'no_groups'
groups = self.driver.ex_list_security_groups()
self.assertEqual(groups, [])
def test_ex_create_security_group(self):
group = self.driver.ex_create_security_group(name='MySG')
self.assertEqual(group['name'], 'MySG')
def test_ex_delete_security_group(self):
res = self.driver.ex_delete_security_group(name='MySG')
self.assertTrue(res)
def test_ex_authorize_security_group_ingress(self):
res = self.driver.ex_authorize_security_group_ingress('test_sg',
'udp',
'0.0.0.0/0',
'0',
'65535')
self.assertEqual(res.get('name'), 'test_sg')
self.assertTrue('ingressrule' in res)
rules = res['ingressrule']
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule['cidr'], '0.0.0.0/0')
self.assertEqual(rule['endport'], 65535)
self.assertEqual(rule['protocol'], 'udp')
self.assertEqual(rule['startport'], 0)
def test_ex_create_affinity_group(self):
res = self.driver.ex_create_affinity_group('MyAG2',
CloudStackAffinityGroupType('MyAGType'))
self.assertEqual(res.name, 'MyAG2')
self.assertIsInstance(res.type, CloudStackAffinityGroupType)
self.assertEqual(res.type.type, 'MyAGType')
def test_ex_create_affinity_group_already_exists(self):
self.assertRaises(LibcloudError,
self.driver.ex_create_affinity_group,
'MyAG', CloudStackAffinityGroupType('MyAGType'))
def test_delete_ex_affinity_group(self):
afg = self.driver.ex_create_affinity_group('MyAG3',
CloudStackAffinityGroupType('MyAGType'))
res = self.driver.ex_delete_affinity_group(afg)
self.assertTrue(res)
def test_ex_update_node_affinity_group(self):
affinity_group_list = self.driver.ex_list_affinity_groups()
nodes = self.driver.list_nodes()
node = self.driver.ex_update_node_affinity_group(nodes[0],
affinity_group_list)
self.assertEqual(node.extra['affinity_group'][0],
affinity_group_list[0].id)
def test_ex_list_affinity_groups(self):
res = self.driver.ex_list_affinity_groups()
self.assertEqual(len(res), 1)
self.assertEqual(res[0].id, '11112')
self.assertEqual(res[0].name, 'MyAG')
self.assertIsInstance(res[0].type, CloudStackAffinityGroupType)
self.assertEqual(res[0].type.type, 'MyAGType')
def test_ex_list_affinity_group_types(self):
res = self.driver.ex_list_affinity_group_types()
self.assertEqual(len(res), 1)
self.assertIsInstance(res[0], CloudStackAffinityGroupType)
self.assertEqual(res[0].type, 'MyAGType')
def test_ex_list_public_ips(self):
ips = self.driver.ex_list_public_ips()
self.assertEqual(ips[0].address, '1.1.1.116')
self.assertEqual(ips[0].virtualmachine_id, '2600')
def test_ex_allocate_public_ip(self):
addr = self.driver.ex_allocate_public_ip()
self.assertEqual(addr.address, '7.5.6.1')
self.assertEqual(addr.id, '10987171-8cc9-4d0a-b98f-1698c09ddd2d')
def test_ex_release_public_ip(self):
addresses = self.driver.ex_list_public_ips()
res = self.driver.ex_release_public_ip(addresses[0])
self.assertTrue(res)
def test_ex_create_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
address = self.driver.ex_list_public_ips()[0]
private_port = 33
private_end_port = 34
public_port = 33
public_end_port = 34
openfirewall = True
protocol = 'TCP'
rule = self.driver.ex_create_port_forwarding_rule(node,
address,
private_port,
public_port,
protocol,
public_end_port,
private_end_port,
openfirewall)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.public_port, public_port)
self.assertEqual(rule.public_end_port, public_end_port)
self.assertEqual(rule.private_port, private_port)
self.assertEqual(rule.private_end_port, private_end_port)
def test_ex_list_firewall_rules(self):
rules = self.driver.ex_list_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.address.address, '1.1.1.116')
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, '33')
self.assertEqual(rule.end_port, '34')
def test_ex_list_firewall_rules_icmp(self):
CloudStackMockHttp.fixture_tag = 'firewallicmp'
rules = self.driver.ex_list_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.address.address, '1.1.1.116')
self.assertEqual(rule.protocol, 'icmp')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertEqual(rule.icmp_code, 0)
self.assertEqual(rule.icmp_type, 8)
self.assertIsNone(rule.start_port)
self.assertIsNone(rule.end_port)
def test_ex_delete_firewall_rule(self):
rules = self.driver.ex_list_firewall_rules()
res = self.driver.ex_delete_firewall_rule(rules[0])
self.assertTrue(res)
def test_ex_create_firewall_rule(self):
address = self.driver.ex_list_public_ips()[0]
cidr_list = '192.168.0.0/16'
protocol = 'TCP'
start_port = 33
end_port = 34
rule = self.driver.ex_create_firewall_rule(address,
cidr_list,
protocol,
start_port=start_port,
end_port=end_port)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, start_port)
self.assertEqual(rule.end_port, end_port)
def test_ex_create_firewall_rule_icmp(self):
address = self.driver.ex_list_public_ips()[0]
cidr_list = '192.168.0.0/16'
protocol = 'icmp'
icmp_code = 0
icmp_type = 8
rule = self.driver.ex_create_firewall_rule(address,
cidr_list,
protocol,
icmp_code=icmp_code,
icmp_type=icmp_type)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.icmp_code, 0)
self.assertEqual(rule.icmp_type, 8)
self.assertIsNone(rule.start_port)
self.assertIsNone(rule.end_port)
def test_ex_list_egress_firewall_rules(self):
rules = self.driver.ex_list_egress_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.network_id, '874be2ca-20a7-4360-80e9-7356c0018c0b')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertEqual(rule.protocol, 'tcp')
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, '80')
self.assertEqual(rule.end_port, '80')
def test_ex_delete_egress_firewall_rule(self):
rules = self.driver.ex_list_egress_firewall_rules()
res = self.driver.ex_delete_egress_firewall_rule(rules[0])
self.assertTrue(res)
def test_ex_create_egress_firewall_rule(self):
network_id = '874be2ca-20a7-4360-80e9-7356c0018c0b'
cidr_list = '192.168.0.0/16'
protocol = 'TCP'
start_port = 33
end_port = 34
rule = self.driver.ex_create_egress_firewall_rule(
network_id,
cidr_list,
protocol,
start_port=start_port,
end_port=end_port)
self.assertEqual(rule.network_id, network_id)
self.assertEqual(rule.cidr_list, cidr_list)
self.assertEqual(rule.protocol, protocol)
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, start_port)
self.assertEqual(rule.end_port, end_port)
def test_ex_list_port_forwarding_rules(self):
rules = self.driver.ex_list_port_forwarding_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertTrue(rule.node)
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.public_port, '33')
self.assertEqual(rule.public_end_port, '34')
self.assertEqual(rule.private_port, '33')
self.assertEqual(rule.private_end_port, '34')
self.assertEqual(rule.address.address, '1.1.1.116')
def test_ex_delete_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
rule = self.driver.ex_list_port_forwarding_rules()[0]
res = self.driver.ex_delete_port_forwarding_rule(node, rule)
self.assertTrue(res)
def test_node_ex_delete_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
self.assertEqual(len(node.extra['port_forwarding_rules']), 1)
node.extra['port_forwarding_rules'][0].delete()
self.assertEqual(len(node.extra['port_forwarding_rules']), 0)
def test_node_ex_create_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
self.assertEqual(len(node.extra['port_forwarding_rules']), 1)
address = self.driver.ex_list_public_ips()[0]
private_port = 33
private_end_port = 34
public_port = 33
public_end_port = 34
openfirewall = True
protocol = 'TCP'
rule = node.ex_create_port_forwarding_rule(address,
private_port,
public_port,
protocol,
public_end_port,
private_end_port,
openfirewall)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.public_port, public_port)
self.assertEqual(rule.public_end_port, public_end_port)
self.assertEqual(rule.private_port, private_port)
self.assertEqual(rule.private_end_port, private_end_port)
self.assertEqual(len(node.extra['port_forwarding_rules']), 2)
def test_ex_list_ip_forwarding_rules(self):
rules = self.driver.ex_list_ip_forwarding_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertTrue(rule.node)
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.start_port, 33)
self.assertEqual(rule.end_port, 34)
self.assertEqual(rule.address.address, '1.1.1.116')
def test_ex_limits(self):
limits = self.driver.ex_limits()
self.assertEqual(limits['max_images'], 20)
self.assertEqual(limits['max_networks'], 20)
self.assertEqual(limits['max_public_ips'], -1)
self.assertEqual(limits['max_vpc'], 20)
self.assertEqual(limits['max_instances'], 20)
self.assertEqual(limits['max_projects'], -1)
self.assertEqual(limits['max_volumes'], 20)
self.assertEqual(limits['max_snapshots'], 20)
def test_ex_create_tags(self):
node = self.driver.list_nodes()[0]
tags = {'Region': 'Canada'}
resp = self.driver.ex_create_tags([node.id], 'UserVm', tags)
self.assertTrue(resp)
def test_ex_delete_tags(self):
node = self.driver.list_nodes()[0]
tag_keys = ['Region']
resp = self.driver.ex_delete_tags([node.id], 'UserVm', tag_keys)
self.assertTrue(resp)
def test_list_snapshots(self):
snapshots = self.driver.list_snapshots()
self.assertEqual(len(snapshots), 3)
snap = snapshots[0]
self.assertEqual(snap.id, 188402)
self.assertEqual(snap.extra['name'], "i-123-87654-VM_ROOT-12344_20140917105548")
self.assertEqual(snap.extra['volume_id'], 89341)
def test_create_volume_snapshot(self):
volume = self.driver.list_volumes()[0]
snapshot = self.driver.create_volume_snapshot(volume)
self.assertEqual(snapshot.id, 190547)
self.assertEqual(snapshot.extra['name'], "i-123-87654-VM_ROOT-23456_20140917105548")
self.assertEqual(snapshot.extra['volume_id'], "fe1ada16-57a0-40ae-b577-01a153690fb4")
def test_destroy_volume_snapshot(self):
snapshot = self.driver.list_snapshots()[0]
resp = self.driver.destroy_volume_snapshot(snapshot)
self.assertTrue(resp)
def test_ex_create_snapshot_template(self):
snapshot = self.driver.list_snapshots()[0]
template = self.driver.ex_create_snapshot_template(snapshot, "test-libcloud-template", 99)
self.assertEqual(template.id, '10260')
self.assertEqual(template.name, "test-libcloud-template")
self.assertEqual(template.extra['displaytext'], "test-libcloud-template")
self.assertEqual(template.extra['hypervisor'], "VMware")
self.assertEqual(template.extra['os'], "Other Linux (64-bit)")
def test_ex_list_os_types(self):
os_types = self.driver.ex_list_os_types()
self.assertEqual(len(os_types), 146)
self.assertEqual(os_types[0]['id'], 69)
self.assertEqual(os_types[0]['oscategoryid'], 7)
self.assertEqual(os_types[0]['description'], "Asianux 3(32-bit)")
def test_ex_list_vpn_gateways(self):
vpn_gateways = self.driver.ex_list_vpn_gateways()
self.assertEqual(len(vpn_gateways), 1)
self.assertEqual(vpn_gateways[0].id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_gateways[0].account, 'some_account')
self.assertEqual(vpn_gateways[0].domain, 'some_domain')
self.assertEqual(vpn_gateways[0].domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8')
self.assertEqual(vpn_gateways[0].public_ip, '1.2.3.4')
self.assertEqual(vpn_gateways[0].vpc_id, '4d25e181-8850-4d52-8ecb-a6f35bbbabde')
def test_ex_create_vpn_gateway(self):
vpc = self.driver.ex_list_vpcs()[0]
vpn_gateway = self.driver.ex_create_vpn_gateway(vpc)
self.assertEqual(vpn_gateway.id, '5ef6794e-cec8-4018-9fef-c4dacbadee14')
self.assertEqual(vpn_gateway.account, 'some_account')
self.assertEqual(vpn_gateway.domain, 'some_domain')
self.assertEqual(vpn_gateway.domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8')
self.assertEqual(vpn_gateway.public_ip, '2.3.4.5')
self.assertEqual(vpn_gateway.vpc_id, vpc.id)
def test_ex_delete_vpn_gateway(self):
vpn_gateway = self.driver.ex_list_vpn_gateways()[0]
self.assertTrue(vpn_gateway.delete())
def test_ex_list_vpn_customer_gateways(self):
vpn_customer_gateways = self.driver.ex_list_vpn_customer_gateways()
self.assertEqual(len(vpn_customer_gateways), 1)
self.assertEqual(vpn_customer_gateways[0].id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_customer_gateways[0].cidr_list, '10.2.2.0/24')
self.assertEqual(vpn_customer_gateways[0].esp_policy, '3des-md5')
self.assertEqual(vpn_customer_gateways[0].gateway, '10.2.2.1')
self.assertEqual(vpn_customer_gateways[0].ike_policy, '3des-md5')
self.assertEqual(vpn_customer_gateways[0].ipsec_psk, 'some_psk')
def test_ex_create_vpn_customer_gateway(self):
vpn_customer_gateway = self.driver.ex_create_vpn_customer_gateway(
cidr_list='10.0.0.0/24',
esp_policy='3des-md5',
gateway='10.0.0.1',
ike_policy='3des-md5',
ipsec_psk='ipsecpsk')
self.assertEqual(vpn_customer_gateway.id, 'cef3c766-116a-4e83-9844-7d08ab7d3fd4')
self.assertEqual(vpn_customer_gateway.esp_policy, '3des-md5')
self.assertEqual(vpn_customer_gateway.gateway, '10.0.0.1')
self.assertEqual(vpn_customer_gateway.ike_policy, '3des-md5')
self.assertEqual(vpn_customer_gateway.ipsec_psk, 'ipsecpsk')
def test_ex_ex_delete_vpn_customer_gateway(self):
vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0]
self.assertTrue(vpn_customer_gateway.delete())
def test_ex_list_vpn_connections(self):
vpn_connections = self.driver.ex_list_vpn_connections()
self.assertEqual(len(vpn_connections), 1)
self.assertEqual(vpn_connections[0].id, '8f482d9a-6cee-453b-9e78-b0e1338ffce9')
self.assertEqual(vpn_connections[0].passive, False)
self.assertEqual(vpn_connections[0].vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_connections[0].vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_connections[0].state, 'Connected')
def test_ex_create_vpn_connection(self):
vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0]
vpn_gateway = self.driver.ex_list_vpn_gateways()[0]
vpn_connection = self.driver.ex_create_vpn_connection(
vpn_customer_gateway,
vpn_gateway)
self.assertEqual(vpn_connection.id, 'f45c3af8-f909-4f16-9d40-ed4409c575f8')
self.assertEqual(vpn_connection.passive, False)
self.assertEqual(vpn_connection.vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_connection.vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_connection.state, 'Connected')
def test_ex_delete_vpn_connection(self):
vpn_connection = self.driver.ex_list_vpn_connections()[0]
self.assertTrue(vpn_connection.delete())
class CloudStackTestCase(CloudStackCommonTestCase, unittest.TestCase):
def test_driver_instantiation(self):
urls = [
'http://api.exoscale.ch/compute1', # http, default port
'https://api.exoscale.ch/compute2', # https, default port
'http://api.exoscale.ch:8888/compute3', # https, custom port
'https://api.exoscale.ch:8787/compute4', # https, custom port
'https://api.test.com/compute/endpoint' # https, default port
]
expected_values = [
{'host': 'api.exoscale.ch', 'port': 80, 'path': '/compute1'},
{'host': 'api.exoscale.ch', 'port': 443, 'path': '/compute2'},
{'host': 'api.exoscale.ch', 'port': 8888, 'path': '/compute3'},
{'host': 'api.exoscale.ch', 'port': 8787, 'path': '/compute4'},
{'host': 'api.test.com', 'port': 443, 'path': '/compute/endpoint'}
]
cls = get_driver(Provider.CLOUDSTACK)
for url, expected in zip(urls, expected_values):
driver = cls('key', 'secret', url=url)
self.assertEqual(driver.host, expected['host'])
self.assertEqual(driver.path, expected['path'])
self.assertEqual(driver.connection.port, expected['port'])
def test_user_must_provide_host_and_path_or_url(self):
expected_msg = ('When instantiating CloudStack driver directly '
'you also need to provide url or host and path '
'argument')
cls = get_driver(Provider.CLOUDSTACK)
assertRaisesRegex(self, Exception, expected_msg, cls,
'key', 'secret')
try:
cls('key', 'secret', True, 'localhost', '/path')
except Exception:
self.fail('host and path provided but driver raised an exception')
try:
cls('key', 'secret', url='https://api.exoscale.ch/compute')
except Exception:
self.fail('url provided but driver raised an exception')
def test_restore(self):
template = NodeImage("aaa-bbb-ccc-ddd", "fake-img", None)
node = self.driver.list_nodes()[0]
res = node.ex_restore(template=template)
self.assertEqual(res, template.id)
def test_change_offerings(self):
offering = NodeSize("eee-fff-ggg-hhh", "fake-size", 1, 4, 5, 0.1, None)
node = self.driver.list_nodes()[0]
res = node.ex_change_node_size(offering=offering)
self.assertEqual(res, offering.id)
class CloudStackMockHttp(MockHttp, unittest.TestCase):
fixtures = ComputeFileFixtures('cloudstack')
fixture_tag = 'default'
def _load_fixture(self, fixture):
body = self.fixtures.load(fixture)
return body, json.loads(body)
def _test_path_invalid_credentials(self, method, url, body, headers):
body = ''
return (httplib.UNAUTHORIZED, body, {},
httplib.responses[httplib.UNAUTHORIZED])
def _test_path_api_error(self, method, url, body, headers):
body = self.fixtures.load('registerSSHKeyPair_error.json')
return (431, body, {},
httplib.responses[httplib.OK])
def _test_path(self, method, url, body, headers):
url = urlparse.urlparse(url)
query = dict(parse_qsl(url.query))
self.assertTrue('apiKey' in query)
self.assertTrue('command' in query)
self.assertTrue('response' in query)
self.assertTrue('signature' in query)
self.assertTrue(query['response'] == 'json')
del query['apiKey']
del query['response']
del query['signature']
command = query.pop('command')
if hasattr(self, '_cmd_' + command):
return getattr(self, '_cmd_' + command)(**query)
else:
fixture = command + '_' + self.fixture_tag + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _test_path_userdata(self, method, url, body, headers):
if 'deployVirtualMachine' in url:
self.assertUrlContainsQueryParams(url, {'userdata': 'Zm9vYmFy'})
return self._test_path(method, url, body, headers)
def _cmd_queryAsyncJobResult(self, jobid):
fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
import numpy as np
for i in range(40):
print(
np.floor(i / 4).astype(int))
|
import re
WORD = re.compile(r'\w+')
def tokenize(text):
"""
this function tokenizes text at a very high speed
:param str text: text to be tokenized
:rtype: list[str]
"""
words = WORD.findall(text)
return words
|
value = 1
<caret>if value < 1:
print("Less")
else:
print("Greater or equal")
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@DNNClassifier
@@DNNRegressor
@@LinearClassifier
@@LinearRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import itertools
import logging
import os
import socket
import time
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.distributed as dist
from classy_vision.generic.distributed_util import (
barrier,
is_primary,
set_cpu_device,
set_cuda_device_index,
)
from classy_vision.generic.util import copy_model_to_gpu
from classy_vision.hooks.classy_hook import ClassyHook
from classy_vision.tasks import TASK_REGISTRY, ClassyTask
from vissl.config import AttrDict
from vissl.hooks import SSLClassyHookFunctions
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.trainer.train_steps import get_train_step
from vissl.utils.distributed_utils import all_gather_heterogeneous, all_gather_sizes
from vissl.utils.env import get_machine_local_and_dist_rank
from vissl.utils.io import save_file
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config.TRAINER.TASK_NAME].from_config(config)
return task
class SelfSupervisionTrainer(object):
"""
The main entry point for any training or feature extraction workflows in VISSL.
The trainer constructs a train_task which prepares all the components of the
training (optimizer, loss, meters, model etc) using the settings specified by user
in the yaml config file. See the vissl/trainer/train_task.py for more details.
Args:
cfg (AttrDict): user specified input config that has optimizer, loss, meters etc
settings relevant to the training
dist_run_id (str): For multi-gpu training with PyTorch, we have to specify
how the gpus are going to rendezvous. This requires specifying
the communication method: file, tcp and the unique rendezvous
run_id that is specific to 1 run.
We recommend:
1) for 1node: use init_method=tcp and run_id=auto
2) for multi-node, use init_method=tcp and specify
run_id={master_node}:{port}
checkpoint_path (str): if the training is being resumed from a checkpoint, path to
the checkpoint. The tools/run_distributed_engines.py automatically
looks for the checkpoint in the checkpoint directory.
checkpoint_folder (str): what directory to use for checkpointing. The
tools/run_distributed_engines.py creates the directory based on user
input in the yaml config file.
hooks (List[ClassyHooks]): the list of hooks to use during the training. The hooks
vissl/engines/{train, extract_features}.py determine the hooks.
"""
def __init__(
self,
cfg: AttrDict,
dist_run_id: str,
checkpoint_path: str = None,
checkpoint_folder: str = None,
hooks: List[ClassyHook] = None,
):
self.cfg = cfg
self.dist_run_id = dist_run_id
self.local_rank, self.distributed_rank = get_machine_local_and_dist_rank()
self.setup_distributed(self.cfg.MACHINE.DEVICE == "gpu")
# now we should build the task. The task will also have the State attached
# to it. It will have information about phases (train, test) both. It will
# also contain all the other information like optimizers, etc
self.task = build_task(self.cfg)
self.task.set_checkpoint_path(checkpoint_path)
self.task.set_checkpoint_folder(checkpoint_folder)
if hooks is None:
hooks = []
self.task.set_hooks(hooks)
def setup_distributed(self, use_gpu: bool):
"""
Setup the distributed training. VISSL support both GPU and CPU only training.
(1) Initialize the torch.distributed.init_process_group if the distributed is
not already initialized. The init_method, backend are specified by user in the
yaml config file. See vissl/defaults.yaml file for description on how to set
init_method, backend.
(2) We also set the global cuda device index using torch.cuda.set_device or
cpu device
"""
# we overwrite the distributed trainer setup here with our config options
distributed_world_size = int(os.environ["WORLD_SIZE"])
assert distributed_world_size % self.cfg.DISTRIBUTED.NUM_NODES == 0
init_method = f"{self.cfg.DISTRIBUTED.INIT_METHOD}://{self.dist_run_id}"
logging.info(
f"Using Distributed init method: {init_method}, "
f"world_size: {distributed_world_size}, rank: {self.distributed_rank}"
)
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend=self.cfg.DISTRIBUTED.BACKEND,
init_method=init_method,
world_size=distributed_world_size,
rank=self.distributed_rank,
)
else:
logging.warning(
"Torch distributed has already been initialized, \
reusing existing configuration"
)
logging.info(
"| initialized host {} as rank {} ({})".format(
socket.gethostname(),
self.distributed_rank,
torch.distributed.get_rank(),
)
)
if use_gpu:
set_cuda_device_index(self.local_rank)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available() and (self.cfg.DISTRIBUTED.BACKEND == "nccl"):
dist.all_reduce(torch.zeros(1).cuda())
else:
set_cpu_device()
def train(self):
"""
The train workflow. We get the training loop to use (vissl default is
standard_train_step) but the user can create their own training loop
and specify the name TRAINER.TRAIN_STEP_NAME
The training happens:
1. Execute any hooks at the start of training (mostly resets the variable like
iteration num phase_num etc)
2. For each epoch (train or test), run the hooks at the start of an epoch. Mostly
involves setting things like timer, setting dataloader epoch etc
3. Execute the training loop (1 training iteration) involving forward, loss, backward,
optimizer update, metrics collection etc.
4. At the end of epoch, sync meters and execute hooks at the end of phase. Involves
things like checkpointing model, logging timers, logging to tensorboard etc
"""
train_step_fn = get_train_step(self.cfg["TRAINER"]["TRAIN_STEP_NAME"])
self.task.prepare(pin_memory=self.cfg.DATA.PIN_MEMORY)
self.task.init_distributed_data_parallel_model()
# Find what phase, train_phase_idx, local_iteration_num we are starting from.
# Recover it from the checkpoint (if available)
task, phase_idx, iteration_num = self._init_training_state(self.cfg, self.task)
# Good to go, (re) start training
task.run_hooks(SSLClassyHookFunctions.on_start.name)
if is_primary():
logging.info("Model is:\n {}".format(task.model))
logging.info("Loss is: {}".format(task.loss))
logging.info("Starting training....")
while phase_idx + 1 < len(task.phases):
self._advance_phase(task) # advances task.phase_idx
phase_idx += 1
iteration_num += 1
task.local_iteration_num = iteration_num # iteration_num=0 at this step
task.run_hooks(SSLClassyHookFunctions.on_phase_start.name)
while True:
try:
if self.cfg.MODEL.CUDA_CACHE.CLEAR_CUDA_CACHE and (
iteration_num % self.cfg.MODEL.CUDA_CACHE.CLEAR_FREQ == 0
):
logging.info(
f"Emptying CUDA cache at step count: {iteration_num}"
)
torch.cuda.empty_cache()
logging.info("CUDA cache cleared")
task = train_step_fn(task)
iteration_num += 1
task.local_iteration_num = iteration_num
# Book-keeping: update the training iteration number (only updated
# if it's a training phase).
task.iteration += 1 if task.train else 0
# Book-keeping. Track how many forward passes have been done.
# aka how many batches have been seen by the trainer irrespective of
# the train or test phase.
task.batches += 1
# update the batch time aka the training time for the current iteration.
task.batch_time.append(time.time() - task.start_time)
task.start_time = time.time()
task.run_hooks(SSLClassyHookFunctions.on_step.name)
except StopIteration:
break
except Exception as e:
task.run_hooks(SSLClassyHookFunctions.on_exception.name)
raise e
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
barrier()
task.run_hooks(SSLClassyHookFunctions.on_phase_end.name)
task.run_hooks(SSLClassyHookFunctions.on_end.name)
if hasattr(task, "data_iterator"):
del task.data_iterator
gc.collect()
if hasattr(task, "dataloaders"):
del task.dataloaders
gc.collect()
@staticmethod
def _init_training_state(cfg, task: ClassyTask) -> Tuple[ClassyTask, int, int]:
"""
If a checkpoint is present, recover the current training status.
If not initialize everything properly
Args:
task {ClassyTask}: object consisting of all components a training requires
(meters, optimizers, model, loss etc.)
Returns:
task {ClassyTask}: updated task
phase_idx {int}: phase index
iteration_num: iteration number
"""
phase_idx, iteration_num = -1, -1
# Ensure that train loader exists. Will NOT exist if config.TEST_ONLY is True
if "train" in task.dataloaders.keys():
loader_key = "train"
else:
loader_key = "test"
task.max_iteration = task.num_train_phases * len(task.dataloaders[loader_key])
if task.checkpoint is not None:
phase_idx = task.checkpoint["phase_idx"]
task.train_phase_idx = task.checkpoint["train_phase_idx"]
task.local_iteration_num = task.checkpoint["iteration_num"]
task.iteration = task.checkpoint["iteration"]
else:
task.iteration = 0
task.local_iteration_num = iteration_num
num_iter_in_phase = len(task.dataloaders[loader_key])
num_iter_in_epoch = num_iter_in_phase * task.num_train_phases_per_epoch
num_samples = task.num_phase_samples(loader_key)
task.start_time = time.time()
task.batch_time = []
task.metrics = {}
logging.info(f"Training {task.num_epochs} epochs")
logging.info(f"One epoch = {num_iter_in_epoch} iterations.")
logging.info(f"Total {num_samples} samples in one epoch")
if task.num_epochs != task.num_train_phases:
logging.info(f"Training a total of {task.num_train_phases} train phases.")
logging.info(f"One phase = {num_iter_in_phase} iterations.")
logging.info(f"Total {task.max_iteration} iterations for training")
return task, phase_idx, task.local_iteration_num
def _advance_phase(self, task: ClassyTask):
"""
Advance the training phase to the next phase.
- Updates the phase number,
- resets the meters,
- reset losses,
- recreates the data iterator and destroys previous iterator
- set the model to be in train or eval phase depending on what phase we are in
- execute any optimizer update (normally learning rate updates etc at the end of
an epoch)
"""
# reset the meters at the beginning of the epoch
for meter in task.meters:
meter.reset()
# reset the loss history for this epoch
task.losses = []
# advance the epoch num to be current
task.phase_idx += 1
phase = task.phases[task.phase_idx]
task.train = True if phase["train"] else False
if task.train:
task.train_phase_idx += 1
# get a new data iterator - delete the iterator at the beginning explicitly
# so that all dataloader processes are cleaned up
phase_type = "train" if phase["train"] else "test"
# we are advancing to next epoch, so no need to compute start_iter,
# just let it to be 0 inside of recreate_data_iterator. However, if we are just
# starting from the resumed training, we want to compute_start_iter
# again (if applicable) since we recreate the data iterator and delete
# the old ones.
compute_start_iter = False
if task.checkpoint is not None and task.checkpoint["train_phase_idx"] == (
task.train_phase_idx - 1
):
compute_start_iter = True
task.recreate_data_iterator(
phase_type,
epoch=task.phase_idx,
compute_start_iter=compute_start_iter,
train_phase_idx=task.train_phase_idx,
)
# set the model to train or eval depending on what phase we are in
task.model.train(phase["train"])
if task.train and task.train_phase_idx >= 0:
task.optimizer.on_epoch(task.where)
local_rank, _ = get_machine_local_and_dist_rank()
logging.info(f"Phase advanced. Rank: {local_rank}")
def extract(
self,
output_folder: str,
extract_features: bool = True,
extract_predictions: bool = False,
) -> None:
"""
Extract workflow supports multi-gpu feature extraction and also extracting
predicted labels. Since we are only extracting features or label predictions,
only the model is built (and initialized from some model weights file
if specified by user). Optionally the meters are built if the labels
are being extracted. The model is set to the eval mode fully.
The features / labels are extracted for whatever data splits (train, val, test)
the user wants.
"""
# support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info(f"Model is:\n {self.task.model}")
# Get the names of the features that we are extracting. If user doesn't
# specify the features to evaluate, we get the full model output and freeze
# head/trunk both as caution.
feat_names = get_trunk_output_feature_names(self.cfg.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for split in self.task.available_splits:
logging.info(f"============== Split: {split} =======================")
self.task.data_iterator = iter(self.task.dataloaders[split.lower()])
if extract_features:
logging.info(f"Extracting features for partition: {split.lower()}")
self._extract_split_features(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting features for partition: {split.lower()}")
if extract_predictions:
logging.info(f"Extracting predictions for partition: {split.lower()}")
self._extract_split_label_predictions(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting predictions for partition: {split.lower()}")
self._cleanup_task()
def _to_unique_feature_names(self, feat_names: List[str]) -> List[str]:
"""
We may have multiple head with different average pooling for
the same features. In case of export, we want to make sure to
export the outputs of these heads with different names.
This function will rename the features in the following way:
["res4", "res4", "res5"] -> ["res4", "res4_1", "res5"]
No effect if there are no duplicate feature names.
"""
counter = {}
new_feat_names = []
for feat_name in feat_names:
index = counter.get(feat_name, 0)
if index > 0:
new_feat_names.append(f"{feat_name}_{index}")
else:
new_feat_names.append(feat_name)
counter[feat_name] = index + 1
return new_feat_names
def _extract_split_label_predictions(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
feat_names = self._to_unique_feature_names(feat_names)
out_predictions, out_targets, out_scores = {}, {}, {}
for feat_name in feat_names:
out_predictions[feat_name] = {}
out_scores[feat_name] = {}
out_targets[feat_name] = {}
assert len(task.meters) > 0, "Please specify one meter to extract predictions"
assert len(task.meters) == 1, "Please use only one meter to extract predictions"
for meter in task.meters:
assert hasattr(
meter, "get_predictions"
), f"Meter {meter.name} doesn't implement get_predictions function"
for count in itertools.count(start=0, step=1):
try:
if count % 100 == 0:
logging.info(f"Label prediction extraction iteration: {count}")
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
model_output = task.model(input_sample["input"])
# get the model predictions using the meter
if isinstance(model_output, list):
model_output_cpu = [x.cpu() for x in model_output]
else:
model_output_cpu = model_output.cpu()
for meter in task.meters:
meter.update(
model_output_cpu, sample["label"][0].detach().cpu()
)
predictions, pred_scores = task.meters[0].get_predictions(
model_output_cpu
)
num_images = input_sample["inds"].shape[0]
for num, layer_name in enumerate(feat_names):
pred = predictions[num]
score = pred_scores[num]
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
if not (index in out_predictions[layer_name]):
out_targets[layer_name][index] = targets[idx].reshape(
-1
)
out_predictions[layer_name][index] = pred[idx]
out_scores[layer_name][index] = score[idx]
except StopIteration:
break
# print the meters results. This can offer a validation
# of the extracted predictions.
self._sync_and_print_meters(task)
# save the predictions, targets and image indices now
self._save_extracted_label_predictions(
predictions=out_predictions,
confidence_scores=out_scores,
targets=out_targets,
dist_rank=dist_rank,
split=split_name,
output_folder=output_folder,
)
@staticmethod
def _save_extracted_label_predictions(
predictions,
confidence_scores,
targets,
dist_rank: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in predictions.keys():
predictions[layer_name] = dict(sorted(predictions[layer_name].items()))
targets[layer_name] = dict(sorted(targets[layer_name].items()))
confidence_scores[layer_name] = dict(
sorted(confidence_scores[layer_name].items())
)
preds = np.array(torch.stack(list(predictions[layer_name].values())))
scores = np.array(torch.stack(list(confidence_scores[layer_name].values())))
N = preds.shape[0]
output[layer_name] = {
"predictions": preds.reshape(N, -1),
"confidence_scores": scores.reshape(N, -1),
"targets": np.array(list(targets[layer_name].values())),
"inds": np.array(list(predictions[layer_name].keys())),
}
split = split.lower()
for layer_name, layer_prediction in output.items():
out_pred_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_predictions.npy"
)
out_scores_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_conf_scores.npy"
)
out_target_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_targets.npy"
)
out_inds_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_inds.npy"
)
logging.info(
f"For {layer_name}, "
f"saving predictions: {layer_prediction['predictions'].shape}, "
f"saving scores: {layer_prediction['confidence_scores'].shape}, "
f"targets: {layer_prediction['targets'].shape}, "
f"inds: {layer_prediction['inds'].shape}"
)
save_file(layer_prediction["predictions"], out_pred_file)
save_file(layer_prediction["confidence_scores"], out_scores_file)
save_file(layer_prediction["targets"], out_target_file)
save_file(layer_prediction["inds"], out_inds_file)
def _sync_and_print_meters(self, task):
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
if is_primary():
rank, _ = get_machine_local_and_dist_rank()
for meter in task.meters:
if len(task.meters) > 0 and (
(task.train and task.config["METERS"]["enable_training_meter"])
or (not task.train)
):
meter_value = meter.value
metric_key = f"{meter.name}"
if metric_key not in task.metrics:
task.metrics[metric_key] = []
task.metrics[metric_key].append(meter_value)
logging.info(
f"Rank: {rank}, name: {metric_key}, value: {meter_value}"
)
@staticmethod
def _flatten_features_list(features: Dict[str, Any]):
assert isinstance(features, list), "features must be of type list"
is_nested = isinstance(features[0], list)
if is_nested:
flat_features_list = [item for sublist in features for item in sublist]
return flat_features_list
return features
@staticmethod
def _save_extracted_features(
features,
targets,
dist_rank: int,
chunk_index: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in features.keys():
indices = sorted(features[layer_name].keys())
if len(indices) > 0:
output[layer_name] = {
"inds": np.array(indices),
"features": np.array([features[layer_name][i] for i in indices]),
"targets": np.array([targets[layer_name][i] for i in indices]),
}
for layer_name, layer_features in output.items():
out_feat_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_features.npy",
)
out_target_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_targets.npy",
)
out_inds_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_inds.npy",
)
save_file(layer_features["features"], out_feat_file)
save_file(layer_features["targets"], out_target_file)
save_file(layer_features["inds"], out_inds_file)
def _extract_split_features(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
out_features, out_targets = {}, {}
for feat_name in feat_names:
out_features[feat_name], out_targets[feat_name] = {}, {}
chunk_index = 0
feature_buffer_size = 0
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["input"])
flat_features_list = self._flatten_features_list(features)
num_images = input_sample["inds"].shape[0]
feature_buffer_size += num_images
for num, feat_name in enumerate(feat_names):
feature = flat_features_list[num].cpu().numpy()
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
out_features[feat_name][index] = feature[idx]
out_targets[feat_name][index] = targets[idx].reshape(-1)
if (
feature_buffer_size
>= self.cfg.EXTRACT_FEATURES.CHUNK_THRESHOLD
>= 0
):
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
for layer_name in out_features.keys():
out_features[layer_name].clear()
chunk_index += 1
feature_buffer_size = 0
except StopIteration:
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
break
def _add_dummy_layer(self):
"""
In case of feature evaluation mode, if we are freezing both trunk and
head, DDP won't work as there are no parameters in the model. Adding
the dummy head will lead to features being not right. So we rather
add the dummy layer to the model and use DDP. We copy the model to
gpu (if using gpus) after the new dummy layer addition.
"""
fully_frozen_model = self.task.base_model.is_fully_frozen_model()
if fully_frozen_model:
self.task.base_model.dummy_layer = torch.nn.Linear(4, 4)
if self.task.device.type == "cuda":
self.task.base_model = copy_model_to_gpu(self.task.base_model)
def _cleanup_task(self):
if hasattr(self.task, "data_iterator"):
del self.task.data_iterator
gc.collect()
if hasattr(self.task, "dataloaders"):
del self.task.dataloaders
gc.collect()
def extract_clusters(self) -> Dict[str, Dict[int, int]]:
"""
Workflow to extract multi-gpu cluster extraction for pre-trained models
based on clusterization (SwAV, DeepCluster, etc).
The function returns a map from image index to cluster index for the
whole dataset for each of the different splits.
"""
# Support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Assert that the model support extract of clusters
error_message = "Extracting clusters is only available for pre-training methods based on clusters" # NOQA
assert self.task.base_model.is_clustering_model(), error_message
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info("Model is:\n {}".format(self.task.model))
# Compute the cluster assignment on each worker in parallel
cluster_assignment = {}
for split in self.task.available_splits:
msg = f"Extracting cluster assignment for partition: {split}"
logging.info(msg)
cluster_assignment[split] = self._get_cluster_assignment_for_split(
self.task, split
)
logging.info("Done: " + msg)
self._cleanup_task()
# Merge the cluster assignments and group by cluster
return self._merge_cluster_assignments(cluster_assignment)
def _get_cluster_assignment_for_split(self, task: ClassyTask, split: str):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
cluster_assignments = {}
task.data_iterator = iter(self.task.dataloaders[split.lower()])
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"images": torch.cat(sample["data"]).cuda(non_blocking=True),
"indices": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["images"])
features = features[0]
prototype_score = features[1]
prototype_index = prototype_score.argmax(dim=-1)
num_images = input_sample["indices"].shape[0]
for idx in range(num_images):
image_index = input_sample["indices"][idx]
cluster_assignments[image_index] = prototype_index[idx].item()
except StopIteration:
break
return cluster_assignments
@staticmethod
def _merge_cluster_assignments(
rank_cluster_assignment: Dict[str, Dict[int, int]]
) -> Dict[str, Dict[int, int]]:
"""
All gather all the cluster assignments computed by the different workers on
separate parts of the dataset and merge them in a single map
"""
merged_cluster_assignments = {}
for split in rank_cluster_assignment.keys():
split_assignments = list(rank_cluster_assignment[split].items())
image_indices = [assignment[0] for assignment in split_assignments]
image_indices = torch.LongTensor(image_indices).cuda(
torch.cuda.current_device()
)
cluster_indices = [assignment[1] for assignment in split_assignments]
cluster_indices = torch.LongTensor(cluster_indices).cuda(
torch.cuda.current_device()
)
sizes = all_gather_sizes(image_indices)
all_image_indices = all_gather_heterogeneous(sizes, image_indices)
all_cluster_indices = all_gather_heterogeneous(sizes, cluster_indices)
merged_cluster_assignments[split] = {}
for image_indices, cluster_indices in zip(
all_image_indices, all_cluster_indices
):
for image_id, cluster_id in zip(image_indices, cluster_indices):
merged_cluster_assignments[split][
image_id.item()
] = cluster_id.item()
return merged_cluster_assignments
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-containerregistry"
PACKAGE_PPRINT_NAME = "Container Registry"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.5.0',
'msrestazure>=0.4.32,<2.0.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-mgmt-nspkg'],
}
)
|
import numpy as np
from .utils.diversifier import kmeanspp
from .utils.ops import cosine_similarity, softmax, divsum
from .graphs.nearestneighbors import NearestNeighbors
class QuickRecommender:
"""
QuickRecommender
Creates a content-based model using a nearest-neighbors graph, updates user-preferences
based on the graph, diversifies using K-Means++, and returns the results.
Each user will have a `user-vector` which is basically an array with length N, where N
is the number of samples.
The recommender requires the parameters `n_neighbors`, and `metric`.
`n_neighbors` specifies the number of nearest neighbors to store.
`metric` specifies the similarity metric, which can be set to any callable that inputs
a matrix and returns its self-similarity NxN matrix. It can also be set to the string
``cosine`` in order to use cosine similarity.
"""
def __init__(self, n_neighbors=50, metric='cosine'):
self._input_data = None
self._n_samples = -1
self._nn_graph = None
self._n_neighbors = n_neighbors
if type(metric) is callable:
self.similarity_fn = metric
elif type(metric) is str:
if metric == 'cosine':
self.similarity_fn = cosine_similarity
else:
raise RuntimeError('Metric `{}` not supported.'.format(metric))
else:
raise RuntimeError('Metric of type {} not supported.'.format(type(metric)))
def get_nn_graph(self):
return self._nn_graph
def fit(self, input_data):
"""
Creates the nearest-neighbors graph and stores it
"""
self._input_data = input_data
self._n_samples = input_data.shape[0]
self._nn_graph = NearestNeighbors(n_samples=self._n_samples, n_neighbors=self._n_neighbors)
self._nn_graph.fit(input_data, similarity_fn=self.similarity_fn)
def recommend(self, user_vector=None, n_recommendations=10):
"""
Recommends a given user items based on its preferences or `user_vector`
:param user_vector: ndarray, or list of size n_samples or None (initial state)
:param n_recommendations:
:return: list of recommended items` indices
"""
assert n_recommendations <= self._n_samples
assert self._nn_graph is not None
if user_vector is None:
user_vector = np.ones(self._n_samples, dtype=float)
assert len(user_vector) == self._n_samples
if type(user_vector) is list:
user_vector = np.array(user_vector)
n_init = min(int(n_recommendations * 1.5), self._n_samples)
initial_recommendations_idx = np.random.choice(self._n_samples, n_init, replace=False, p=divsum(user_vector))
diversified_idx = kmeanspp(self._input_data[initial_recommendations_idx, :], n_recommendations)
return initial_recommendations_idx[diversified_idx]
def update(self, user_vector=None, selections=None):
"""
Updates the user vector based on the user's selections (indices of samples)
:param user_vector: ndarray, or list of size n_samples or None (initial state)
:param selections: list of selected items` indices
:return: updated user vector
"""
assert self._nn_graph is not None
if selections is None:
return None
if user_vector is None:
user_vector = np.zeros(self._n_samples, dtype=float)
assert len(user_vector) == self._n_samples
if type(user_vector) is list:
user_vector = np.array(user_vector)
for s in selections:
neighbors = list(self._nn_graph.neighbors[s, :])
user_vector[neighbors] = np.max([user_vector[neighbors], self._nn_graph.similarities[s, :]], axis=0)
return user_vector
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# System Fields Test is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Record serializers."""
from __future__ import absolute_import, print_function
from invenio_records_rest.serializers.json import JSONSerializer
from invenio_records_rest.serializers.response import record_responsify, \
search_responsify
from ..marshmallow import RecordSchemaV1
# Serializers
# ===========
#: JSON serializer definition.
json_v1 = JSONSerializer(RecordSchemaV1, replace_refs=True)
# Records-REST serializers
# ========================
#: JSON record serializer for individual records.
json_v1_response = record_responsify(json_v1, 'application/json')
#: JSON record serializer for search results.
json_v1_search = search_responsify(json_v1, 'application/json')
__all__ = (
'json_v1',
'json_v1_response',
'json_v1_search',
)
|
"""
Automated semblance velocity picking with dynamic programming
@author Andrew Munoz, CSM
"""
from bputils import *
from imports import *
papers=False
slides=False
setvis=True
if papers:
pngDir = "/Users/amunoz/Home/pics/sem/p_"
elif slides:
pngDir = "/Users/amunoz/Home/pics/sem/s_"
# cmp number -803 (SP1 and x=0)
#gix2 = 0
gix2 = 251
fpeak = 30
#xmax = -7.0
def main(args):
#data = getData(gix2)#xmax=xmax)#,stack=1)
#hypTest(data)
#nonHypTest(data)
nonHypTestLoop()
## others
#goHypTest()
#goNonHypTest(data)
#goHyp(data)
#goNonHyp(data)
#plotDq(data)
#goNishantTest()
def nonHypTestLoop():
fcmp = 0#803
n = 803#4799-fcmp
eer = zerofloat(n)
ver = zerofloat(n)
eir = zerofloat(n)
vir = zerofloat(n)
print 'n=',n
sw = Stopwatch()
sw.start()
for i in range(n):
data = getData(i+fcmp)
pdata = nonHypLoop(data)
rv,re,ev,ee,rvi,rei,evi,eei = pdata
eer[i] = sum(abs(sub(re,ee)))
ver[i] = sum(abs(mul(div(sub(ev,rv),rv),100)))
eir[i] = sum(abs(sub(rei,eei)))
vir[i] = sum(abs(mul(div(sub(evi,rvi),rvi),100)))
print 'i=',i
sw.stop()
print 'took',sw.time()/60,'minutes to complete'
plot1special(eer,ver)
plot1special(eir,vir)
eei = zeroint(1)
vei = zeroint(1)
eii = zeroint(1)
vii = zeroint(1)
print 'min eta error',min(eer,eei),'is at cmp',eei[0]
print 'min velocity error',min(ver,vei),'is at cmp',vei[0]
print 'min eta interval error',min(eir,eii),'is at cmp',eii[0]
print 'min velocity interval error',min(vir,vii),'is at cmp',vii[0]
def nonHypLoop(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
n1 = s1.count
d1 = s1.delta
vpl = [1.492,3.00,0.004]
epl = [0.000,0.15,0.004]
vpli = [1.400,4.50]
epli = [-0.00,0.40]
wbi = int(wb/d1)
dr = 0.03
r1min,r1max = -0.05,0.55
r2min,r2max = -0.05,0.15
DvDt = d1/vpl[2]
DeDt = d1/epl[2]
pl1,pu1 = 0.5,2.0
pl2,pu2 = 1.0,1.0
nv = int((vpl[1]-vpl[0])/vpl[2])
ne = int((epl[1]-epl[0])/epl[2])
sv = Sampling(nv,vpl[2],vpl[0])
se = Sampling(ne,epl[2],epl[0])
vl1,vl2 = makeIntconsts(vpli[0],vpli[1],s1,wbi,pl1,pu1)
el1,el2 = fillfloat(epli[0],n1),fillfloat(epli[1],n1)
p1min,p1max,p2min,p2max,vel,veu,eel,eeu = getTimeVaryingConstraints(s1,wbi,vl1,vl2,el1,el2,dr)
p1max,p1min = mul(p1min,DvDt),mul(p1max,-DvDt)
p2max,p2min = mul(p2min,DeDt),mul(p2max,DeDt)
s = Semblance(s1,so,fpeak)
s.setStretchMute(0.75)
ps = s.applyNonHypV(g,sv,se,vel,veu,eel,eeu)
#ps = s.applyNonHypV(g,sv,se)
#stk = stackGats2(g,s1)
#ps = mul(ps,ps)
#ps = mul(ps,ps)
vln1,vln2 = makeIntconsts(vpl[0],vpl[1],s1,wbi,0.8,1.2)
stk = stackSem2(pow(ps,4),vln1,vln2,sv)
ds = DynamicSolver(sv.count,se.count,r1min,r1max,r2min,r2max,dr,wbi)
ds.setTimeVaryingStrainLimits(p1min,p1max,p2min,p2max)
#ds.setWbParam(sv.indexOfNearest(1.492),se.indexOfNearest(0.0))
#up1,up2 = ds.findSolution(stk,ps,ng)
up1,up2 = ds.findSolution(stk,ps)
#up1,up2 = ds.findSolution(ps)
#cgs = ds.getCgs()
#plot1(stk,s1,name="stk-pts",cgs=cgs)
up1 = add(sv.first,mul(up1,sv.delta))
up2 = add(se.first,mul(up2,se.delta))
uv = up1
ue = up2
#up1m,up2m = s.pickMaxSemblance(ps,se,sv)
#q = s.flattenGatherNonHypE(g,up1,up2)
#pduv = mul(div(sub(uv,vnmo),vnmo),100)
vnmoi = dixInversionVnmo(s1,uv)
etai = dixInversionEta(s1,ue,uv,vnmoi)
pdata = vnmo,eta,uv,ue,vnmoi,etai,vpt,etat
return pdata
def plotDq(data):
s1,so,g,vnmo,eta,wb,vp = data
n1 = s1.count
vrmax = mul(vnmo,1.2)
vrmin = mul(vnmo,0.8)
vpmax = mul(vp,1.2)
vpmin = mul(vp,0.8)
def plot(s1,so,g,vnmo,wb,vp,eta):
vrms2 = mul(vnmo,vnmo)
vp2 = mul(vp,vp)
dvdt = zerofloat(n1)
dt = s1.delta
dts = wb
wbi = int(wb/dt)
for i in range(wbi,n1):
dvdt[i] = (vnmo[i]-vnmo[i-1])/(dt)
#dvdt[i] = dt*(vp2[i]/dts-vrms2[i]/(2*sqrt(dts)))/(2*vnmo[i])/1000
#dts += dt
dq1dt = div(mul(-2,dvdt),mul(vrms2,vnmo))
plot1(dvdt,s1,name="dvdt",hlabel="dv/dt (km/s^2)")
plot1(dq1dt,s1,name="dq1dt",hlabel="1/km")
dedt = zerofloat(n1)
for i in range(wbi,n1):
dedt[i] = (eta[i]-eta[i-1])/(dt)
dq2dt = add(mul(2,mul(eta,dq1dt)),mul(div(2,vrms2),dedt))
to = floats(s1.getValues())
xmax = so.getFirst()
xmax2 = xmax*xmax
toxm = mul(2,sqrt(mul(to,to)+div(xmax2,vrms2)))
ddtvdt = sub(1,div(sub(mul(2,to),mul(dvdt,div(xmax2,mul(2,mul(vrms2,vnmo))))),toxm))
plot1(dedt,s1,name="dedt",hlabel="1/s")
plot1(dq2dt,s1,name="dq2dt",hlabel="1/km")
plot1(ddtvdt,s1,name="ddtvdt")
#plot(s1,so,g,vrmax,wb,vpmax,eta)
#plot(s1,so,g,vrmin,wb,vpmin,eta)
plot(s1,so,g,vnmo,wb,vp,eta)
vmax = max(vnmo); vmin = min(vnmo)
dtmin = -sqrt(100/(vmin*vmin))
dtmax = 1-sqrt(1+100/(vmax*vmax))
print vmin
print vmax
print dtmin
print dtmax
ndt = int((dtmax-dtmin)/s1.delta)
dta = rampfloat(dtmin,s1.delta,ndt)
plot1(dta,name="dta 1 sec")
def goHyp(data):
s1,so,g,vnmo,eta,wb,vpt = data
vpl = [1.40,3.00,0.005]
r1min,r1max = -0.50,0.10
dr = 0.02
nv = int((vpl[1]-vpl[0])/vpl[2])
sv = Sampling(nv,vpl[2],vpl[0])
# Test Data
#g = Gather(s1,so,sv)
#vnmo = g.makeLinearParameter(vpl[0],vpl[1])
#g = g.modelGatherHyp(30,1e6,vnmo)
s = Semblance(s1,so,20)
s.setStretchMute(0.50)
#ps = s.applyHypDt(g)
#ps = s.applyHypQ(g)
ps = s.applyHypV(g,vpl[0],vpl[1],vpl[2])
wbi = int(wb/s1.delta)
stk = stackSem(ps)
dw = DynamicSolver(sv.count,r1min,r1max,dr,wbi)
ac = dw.accumulate(transpose(ps))
uv = dw.findSolution(transpose(ps))
#uv = dw.findSolution(stk,transpose(ps))
uv = add(sv.first,mul(uv,sv.delta))
q = s.flattenGatherHyp(g,uv)
#cv = s.pickMaxSemblance(ps)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
#pdcv = mul(div(sub(cv,vnmo),vnmo),100)
print 'plotting'
cmplims=[-6.5,s1.first,so.last,s1.last]
s1c = Sampling(len(ac),s1.delta,s1.first)
plotModelsCmp(g,s1,so,vnmo,eta,'modelscmp',clims=[cmplims[0],cmplims[2]])
plot2(transpose(ac),s1c,sv,cmap=jet,name='accumulated semblance',sem=True)
#plot1(vnmo,s1,uv,cv,name="vnmo")
#plot2(ps,s1,sv,u=cv,name='semblance max pick',sem=True)
plot1(pduv,s1,name='vnmo % difference dp')
#plot1(pdcv,s1,name='vnmo % difference max')
plot1(vnmo,s1,uv,name="vnmo and vnmo estimated")
plot2(ps,s1,sv,u=vnmo,u2=uv,name='semblance comp',sem=True,size=[728,753])
plot2(ps,s1,sv,u=uv,name='semblance est',sem=True,size=[728,753])
plot2(ps,s1,sv,name='semblance',sem=True,size=[728,753])
plot2(q,s1,so,name='flat cmp',cmp=True,perc=99)
plot2(g,s1,so,name='cmp',cmp=True,perc=99,lims=cmplims)
def hypTest(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
pdata = hypTestV(data)
hypPlots(pdata)
#pdata = hypTestDt(data)
#hypPlots(pdata)
#pdata = hypTestQ(data)
#hypPlots(pdata)
def hypPlots(pdata):
s1,so,g,vnmo,eta,wb,pduv,uv,up,vnmop,sv,q,ps,cv = pdata
print 'plotting'
cmplims=[so.first,s1.first,so.last,s1.last]
plotModelsCmp(g,s1,so,vnmo,eta,'modelscmp',clims=[cmplims[0],cmplims[2]])
plot1(pduv,s1,name='vnmo % difference dp')
plot1(vnmo,s1,uv,name="vnmo and vnmo estimated")
plot1comp(s1,vnmo,uv,pduv,name="vnmo comparison",vel=True)
plot2(ps,s1,sv,u=vnmop,u2=up,name='semblance comp',sem=True,size=[728,753])
plot2(ps,s1,sv,u2=up,name='semblance est',sem=True,size=[728,753])
plot2(ps,s1,sv,u=vnmop,u2=cv,name='semblance max comp',sem=True,size=[728,753])
plot2(ps,s1,sv,u2=cv,name='semblance max',sem=True,size=[728,753])
plot2(ps,s1,sv,name='semblance',sem=True,size=[728,753])
plot2(q,s1,so,name='flat cmp zoom',cmp=True,perc=99,lims=[so.first,5.5,so.last,7.0])
plot2(q,s1,so,name='flat cmp',cmp=True,perc=99)
plot2(g,s1,so,name='cmp',cmp=True,perc=99,lims=cmplims)
def hypTestV(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
r1min,r1max,dr1 = -0.10,0.60,0.02;
vpl = [1.402,3.00,0.005]
nv = int((vpl[1]-vpl[0])/vpl[2])
sv = Sampling(nv,vpl[2],vpl[0])
s = Semblance(s1,so,20)
s.setStretchMute(0.75)
ps = s.applyHypV(g,vpl[0],vpl[1],vpl[2])
wbi = int(wb/s1.delta)
stk = stackSem(ps)
dw = DynamicSolver(sv.count,r1min,r1max,dr1,wbi)
dw.setWbParam(sv.indexOfNearest(1.492))
#uv = dw.findSolution(transpose(ps))
uv = dw.findSolution(stk,transpose(ps))
uv = add(sv.first,mul(uv,sv.delta))
up = uv
vnmop = vnmo
q = s.flattenGatherHyp(g,uv)
#q = s.flattenGatherHyp(g,vnmo)
cv = s.pickMaxSemblance(ps,sv)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdata = s1,so,g,vnmo,eta,wb,pduv,uv,up,vnmop,sv,q,ps,cv
return pdata
def hypTestDt(data):
s1,so,g,vnmo,eta,wb,vpt = data
r1min,r1max,dr1 = -0.50,0.50,0.02;
vpl = [1.40,2.80,0.005]
s = Semblance(s1,so,20)
s.setStretchMute(0.50)
vmin = vpl[0]
vmax = vpl[1]
xmax = so.first
tmax = s1.last
dtmax = -sqrt(xmax*xmax/(vmin*vmin));
#dtmin = -sqrt(xmax*xmax/(vmax*vmax));
dtmin = 0
dt = s1.delta;
nv = inro((dtmin-dtmax)/dt);
sv = Sampling(nv,dt,dtmax);
ps = s.applyHypDt(g,sv)
plot2(ps,s1,sv,name='semblance',sem=True,size=[728,753])
wbi = int(wb/s1.delta)
stk = stackSem(ps)
dw = DynamicSolver(sv.count,r1min,r1max,dr1,wbi)
ac = dw.accumulate(transpose(ps))
#up = dw.findSolution(transpose(ps))
up = dw.findSolution(stk,transpose(ps))
up = add(sv.first,mul(up,sv.delta))
to = floats(s1.getValues())
uv = sqrt(div(xmax*xmax,sub(pow(sub(to,up),2),to)))
vnmop = mul(-1,sqrt(div(xmax*xmax,mul(vnmo,vnmo))))
q = s.flattenGatherHyp(g,uv)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdata = s1,so,g,vnmo,eta,wb,pduv,uv,up,vnmop,sv,q,ps
return pdata
def hypTestQ(data):
s1,so,g,vnmo,eta,wb,vpt = data
r1min,r1max,dr1 = -0.05,0.10,0.02;
vpl = [1.40,3.00,0.005]
s = Semblance(s1,so,20)
s.setStretchMute(0.50)
vmin = vpl[0]; vmax = vpl[1]; dq = vpl[2];
qmin = 1.0/(vmax*vmax);
qmax = 1.0/(vmin*vmin);
nq = inro((qmax-qmin)/dq)
sq = Sampling(nq,dq,qmin);
ps = s.applyHypQ(g,vmin,vmax,dq)
wbi = int(wb/s1.delta)
stk = stackSem(ps)
dw = DynamicSolver(sq.count,r1min,r1max,dr1,wbi)
ac = dw.accumulate(transpose(ps))
#uv = dw.findSolution(transpose(ps))
up = dw.findSolution(stk,transpose(ps))
up = add(sq.first,mul(up,sq.delta))
uv = div(1,sqrt(up))
vnmop = div(1,mul(vnmo,vnmo))
q = s.flattenGatherHyp(g,uv)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdata = s1,so,g,vnmo,eta,wb,pduv,uv,up,vnmop,sq,q,ps
return pdata
def normalizeRMSlocal(x,sig):
return WTutils().localRMSnorm(x,sig)
################################################################################
# Nonhyperbolic methods
def nonHypTest(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
pdata = nonHypV(data)
nonHypPlots(pdata)
#pdata = nonHypDt(data)
#nonHypPlots(pdata)
#pdata = nonHypQ(data)
#nonHypPlots(pdata)
return pdata
def nonHypQ(data):
s1,so,g,vnmo,eta,wb,vpt = data
vpl = [1.492,3.00,0.005]
epl = [0.00,0.15,0.005]
r1min,r1max = -0.10,0.50
r2min,r2max = -0.10,0.50
dr = 1.0
dq1 = 0.002
dq2 = dq1
vmin = vpl[0]; vmax = vpl[1];
emin = epl[0]; emax = epl[1];
q1min = 1.0/(vmax*vmax)
q1max = 1.0/(vmin*vmin)
q2min = 2.0*q1min*emin
q2max = 2.0*q1max*emax
nq1 = inro((q1max-q1min)/dq1)
nq2 = inro((q2max-q2min)/dq2)
sq1 = Sampling(nq1,dq1,q1min)
sq2 = Sampling(nq2,dq2,q2min)
# Test Data
nv = int((vpl[1]-vpl[0])/vpl[2])
ne = int((epl[1]-epl[0])/epl[2])
sv = Sampling(nv,vpl[2],vpl[0])
se = Sampling(ne,epl[2],epl[0])
gt = Gather(s1,so,sv,se)
vnmo = gt.makeLinearParameter(vpl[0],vpl[1])
eta = gt.makeLinearParameter(epl[0],epl[1])
g = gt.modelGatherNonHyp(30,1e6,vnmo,eta)
s = Semblance(s1,so,30)
s.setStretchMute(0.50)
ps = s.applyNonHypQ(g,sq1,sq2)
ps = mul(ps,ps)
wbi = int(wb/s1.delta)
wbi=0;
#stk = stackSem2(ps,vl1,vl2,sv)
ds = DynamicSolver(sq1.count,sq2.count,r1min,r1max,r2min,r2max,dr,wbi)
#ds.setWbParam(sq1.indexOfNearest(1/(1.492*1.492)),sq2.indexOfNearest(0.0))
#up1,up2 = ds.findSolution(stk,ps)
up1,up2 = ds.findSolution(ps)
cgs = ds.getCgs()
up1 = add(sq1.first,mul(up1,sq1.delta))
up2 = add(sq2.first,mul(up2,sq2.delta))
uv = div(1,sqrt(up1))
ue = mul(0.5,div(up2,up1))
q = s.flattenGatherNonHypE(g,uv,ue)
qe = s.flattenGatherNonHypE(g,vnmo,eta)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
vnmop = div(1,mul(vnmo,vnmo))
etap = mul(2,mul(eta,vnmop))
pdata = s1,so,g,vnmo,eta,wb,pduv,uv,ue,up1,up2,vnmop,etap,sq1,sq2,q,qe,ps,cgs
return pdata
#GOTO
def nonHypV(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
n1 = s1.count
d1 = s1.delta
vpl = [1.492,3.00,0.004]
epl = [0.000,0.15,0.004]
vpli = [1.400,4.50]
epli = [-0.00,0.40]
wbi = int(wb/d1)
dr = 0.03
#r1min,r1max = -0.50,0.50
#r2min,r2max = -0.50,0.50
r1min,r1max = -0.05,0.55
r2min,r2max = -0.05,0.15
DvDt = d1/vpl[2]
DeDt = d1/epl[2]
pl1,pu1 = 0.5,2.0
pl2,pu2 = 1.0,1.0
nv = int((vpl[1]-vpl[0])/vpl[2])
ne = int((epl[1]-epl[0])/epl[2])
sv = Sampling(nv,vpl[2],vpl[0])
se = Sampling(ne,epl[2],epl[0])
dedt = zerofloat(n1)
dvdt = zerofloat(n1)
for i in range(n1):
dedt[i] = ((eta[i]-eta[i-1])/d1)*DeDt
dvdt[i] = ((vnmo[i]-vnmo[i-1])/d1)*DvDt
dedt[0] = dedt[1]
dvdt[0] = dvdt[1]
plot1(dedt,s1,name="exact dedt slopes")
plot1(dvdt,s1,name="exact dvdt slopes")
vl1,vl2 = makeIntconsts(vpli[0],vpli[1],s1,wbi,pl1,pu1)
#el1,el2 = makeIntconsts(epli[0],epli[1],s1,wbi,pl2,pu2)
el1,el2 = fillfloat(epli[0],n1),fillfloat(epli[1],n1)
p1min,p1max,p2min,p2max,vel,veu,eel,eeu = getTimeVaryingConstraints(s1,wbi,vl1,vl2,el1,el2,dr)
p1max,p1min = mul(p1min,DvDt),mul(p1max,-DvDt)
p2max,p2min = mul(p2min,DeDt),mul(p2max,DeDt)
plot1(vl1,s1,vl2,name="vnmo interval bounds")
plot1(el1,s1,el2,name="eta interval bounds",hlim=[epli[0]-0.1,epli[1]+0.1])
plot1(floats(p1min),s1,floats(p1max),name="vnmo derivative bounds")
plot1(floats(p2min),s1,floats(p2max),name="eeta derivative bounds")
plot1(floats(p1min),s1,floats(p1max),dvdt,name="vnmo derivative bounds comp")
plot1(floats(p2min),s1,floats(p2max),dedt,name="eeta derivative bounds comp")
# Test Data
#gt = Gather(s1,so,sv,se)
#vnmo = gt.makeLinearParameter(vpl[0],vpl[1]-0.1)
#eta = gt.makeLinearParameter(epl[0],epl[1]-.03)
#g = gt.modelGatherNonHyp(30,1e6,vnmo,eta)
s = Semblance(s1,so,fpeak)
s.setStretchMute(0.75)
#ps = s.applyNonHypV(g,sv,se,vel,veu,eel,eeu)
ps = s.applyNonHypV(g,sv,se)
#stk = stackGats2(g,s1)
#ps = mul(ps,ps)
#ps = mul(ps,ps)
vln1,vln2 = makeIntconsts(vpl[0],vpl[1],s1,wbi,0.8,1.2)
stk = stackSem2(pow(ps,4),vln1,vln2,sv)
#ng = 40
#print 'ng=',ng
#wbi=0;
ds = DynamicSolver(sv.count,se.count,r1min,r1max,r2min,r2max,dr,wbi)
ds.setTimeVaryingStrainLimits(p1min,p1max,p2min,p2max)
#ds.setWbParam(sv.indexOfNearest(1.492),se.indexOfNearest(0.0))
#up1,up2 = ds.findSolution(stk,ps,ng)
up1,up2 = ds.findSolution(stk,ps)
#up1,up2 = ds.findSolution(ps)
cgs = ds.getCgs()
plot1(stk,s1,name="stk-pts",cgs=cgs)
up1 = add(sv.first,mul(up1,sv.delta))
up2 = add(se.first,mul(up2,se.delta))
uv = up1
ue = up2
up1m,up2m = s.pickMaxSemblance(ps,se,sv)
#q = s.flattenGatherNonHypE(g,up1m,up2m)
q = s.flattenGatherNonHypE(g,up1,up2)
#qe = s.flattenGatherNonHypE(g,vnmo,eta)
qe = s.flattenGatherNonHypE(g,fillfloat(1.492,len(vnmo)),fillfloat(0.0,len(eta)))
pduv = mul(div(sub(uv,vnmo),vnmo),100)
vnmop = vnmo
etap = eta
vnmoi = dixInversionVnmo(s1,uv)
etai = dixInversionEta(s1,ue,uv,vnmoi)
pdata = s1,so,g,vnmo,eta,wb,pduv,uv,ue,up1,up2,vnmop,etap,sv,se,q,qe,ps,cgs,vel,veu,eel,eeu,up1m,up2m,vnmoi,etai,vpt,etat,vl1,vl2,el1,el2
up2 = div(sub(up2,se.first),se.delta)
dedt = zerofloat(n1)
eeta = div(sub(eta,se.first),se.delta)
deedt = zerofloat(n1)
for i in range(n1):
dedt[i] = (up2[i]-up2[i-1])
deedt[i] =(eeta[i]-eeta[i-1])
dedt[0] = dedt[1]
deedt[0] = deedt[1]
plot1(dedt,s1,deedt,name="computed dedt slopes")
return pdata
def dixInversionVnmo(s1,vnmo):
n = s1.count
vnmoi = zerofloat(n)
vnmo2 = mul(vnmo,vnmo)
for i in range(1,n):
t0 = s1.getValue(i-1)
t1 = s1.getValue(i)
vnmoi[i] = (vnmo2[i]*t1 - vnmo2[i-1]*t0)/(t1-t0)
vnmoi[0] = vnmoi[1]
return sqrt(vnmoi)
def dixInversionEta(s1,eta,vnmo,vnmoi):
n = s1.count
etai = zerofloat(n)
vnmoi4 = mul(mul(mul(vnmoi,vnmoi),vnmoi),vnmoi)
vnmo4 = mul(mul(mul(vnmo,vnmo),vnmo),vnmo)
for i in range(1,n):
t0 = s1.getValue(i-1)
t1 = s1.getValue(i)
etai[i] = ((vnmo4[i]*(1.0+8.0*eta[i])*t1-vnmo4[i-1]*(1.0+8.0*eta[i-1])*t0)/(t1-t0)-vnmoi4[i])/(8.0*vnmoi4[i])
etai[0] = etai[1]
return etai
def getTimeVaryingConstraints(s1,wbi,vl1,vl2,el1,el2,dr):
nt = s1.count
dt = s1.delta
ne = nt-wbi
p1min = zerodouble(nt)
p1max = zerodouble(nt)
p2min = zerodouble(nt)
p2max = zerodouble(nt)
leeta = zerofloat(nt)
ueeta = zerofloat(nt)
to = s1.getValues()
def vnmo2(vp,dt,n,to):
vnmo2 = zerodouble(n)
vps = mul(vp,vp)
vnmo2[0] = vps[0]*dt
for i in range(1,n):
vnmo2[i] = vnmo2[i-1]+vps[i]*dt
dts = dt
for i in range(n):
vnmo2[i] /= dts
dts += dt
return vnmo2
# compute dvdt and dedt min and max
lvnmo2 = vnmo2(vl1,dt,nt,to)
uvnmo2 = vnmo2(vl2,dt,nt,to)
lvnmo1 = sqrt(lvnmo2)
uvnmo1 = sqrt(uvnmo2)
vl1s = mul(vl1,vl1)
vl2s = mul(vl2,vl2)
#plot1(vl1,s1,floats(lvnmo1),name="vnmo vs Vnmo")
for i in range(wbi,nt):
p1min[i] = (vl1s[i] - lvnmo2[i])/(2*to[i]*lvnmo1[i])
p1max[i] = (vl2s[i] - uvnmo2[i])/(2*to[i]*uvnmo1[i])
p2min[i] = (p1min[i]/lvnmo1[i] - 1/to[i] + vl1s[i]*vl1s[i]*(1+8*el1[i]))/(8*to[i]*vl1s[i]*vl1s[i])
p2max[i] = (p1max[i]/uvnmo1[i] - 1/to[i] + vl2s[i]*vl2s[i]*(1+8*el2[i]))/(8*to[i]*vl2s[i]*vl2s[i])
p1minm,p2minm = mul(p1max,1),mul(p2max,1)
p1maxm,p2maxm = mul(p1min,1),mul(p2min,-1)#filldouble(dr,nt),filldouble(dr,nt)
vpq1 = mul(vl1s,vl1s)
vpq2 = mul(vl2s,vl2s)
leeta[0] = vpq1[0]*(1+8*el1[0])*dt
ueeta[0] = vpq2[0]*(1+8*el2[0])*dt
for i in range(1,nt):
leeta[i] = leeta[i-1]+vpq1[i]*(1+8*el1[i])*dt
ueeta[i] = ueeta[i-1]+vpq2[i]*(1+8*el2[i])*dt
v1rms4 = zerofloat(nt);
v2rms4 = zerofloat(nt); dts = dt
for i in range(nt):
v1rms4[i] = lvnmo2[i]*lvnmo2[i]*dts
v2rms4[i] = uvnmo2[i]*uvnmo2[i]*dts
dts += dt
leeta = mul(sub(div(leeta,v1rms4),1),0.125)
ueeta = mul(sub(div(ueeta,v2rms4),1),0.125)
return p1minm,p1maxm,p2minm,p2maxm,floats(lvnmo1),floats(uvnmo1),leeta,ueeta
def makeIntconsts(vpl,vpu,s1,wbi,lp,up):
nt = s1.count
ne = nt-wbi
vpl1 = lp*vpl
vpl2 = up*vpl
vpu1 = lp*vpu
vpu2 = up*vpu
dv1 = (vpu1-vpl1)/ne
dv2 = (vpu2-vpl2)/ne
vp1 = fillfloat(vpl1,nt)
vp2 = fillfloat(vpl2,nt)
for i in range(ne):
vp1[i+wbi] = vpl1+i*dv1
vp2[i+wbi] = vpl2+i*dv2
return vp1,vp2
def nonHypPlots(pdata):
s1,so,g,vnmo,eta,wb,pduv,uv,ue,up1,up2,vnmop,etap,\
sv,se,q,qe,ps,cgs,vel,veu,eel,eeu,up1m,up2m,vnmoi,etai,vpt,etat,vl1,vl2,el1,el2 = pdata
vve=True
print 'plotting'
ut = floats(s1.getValues())
cmplims=[so.first,s1.first,so.last,s1.last]
pduvm = mul(div(sub(up1m,vnmo),vnmo),100)
#plotModelsCmp(g,s1,so,vnmo,eta,'modelscmp',clims=[cmplims[0],cmplims[2]])
plot1(sub(eta,up2),s1,name='eta dp difference')
#plot1(vnmo,s1,uv,cv,name="vnmo")
plot1(eel,s1,eeu,name="eta eff bounds")
plot1(vel,s1,veu,name="vnmo eff bounds")
plot1(pduv,s1,name='vnmo % difference dp')
plot1(eta,s1,ue,name="eta and eta estimated",colm=3)
plot1(vnmo,s1,uv,name="vnmo and vnmo estimated",colm=3)
plot1(vnmo,s1,uv,name="vnmo and vnmo estimated pts",cgs=cgs)
plot1(eta,s1,ue,name="eta and eta estimated pts",cgs=cgs)
plot1comp(s1,vnmo,uv,pduv,name="vnmo nh comp",vel=True)
plot1comp(s1,eta,ue,sub(ue,eta),name="eta nh comp",eta=True,clims2=[-0.025,0.025])
plot1comp(s1,vnmo,up1m,pduvm,name="vnmo and vnmo max comp",vel=True)
plot1comp(s1,eta,up2m,sub(up2m,eta),name="eta and eta max comp",eta=True)
plot1(mul(vpt,0.001),s1,vnmoi,name="vnmo interval vs dix",bounds=[vl1,vl2])
plot1(etat,s1,etai,name="eta interval vs dix",bounds=[el1,el2])
plot1(sub(mul(vpt,0.001),vnmoi),s1,name="vnmoi errors")
plot1(sub(etat,etai),s1,name="etai errors")
ll = [3.0,4.0,5.0,6.0,7.0,8.0,9.0] # 3-9 sec.
#ll = [2.5,3.5,4.5,5.5,6.5,7.5,8.5] # 3-9 sec.
#ll = cgs
llims = [0.0,0.60]
for l in ll:
ls = str(l)
l = round(l/s1.delta)
plot2(ps[l],s1=sv,s2=se,name='vnmo vs eta t='+ls,vve=vve,\
mx=[up1[l],up2[l],vnmop[l],etap[l]],\
sem=True,cmin=llims[0],cmax=llims[1])
plot2(q,s1,so,name='flat nh cmp',cmp=True,perc=99)
plot2(q,s1,so,name='flat nh cmp zoom',cmp=True,perc=99,lims=[so.first,5.5,so.last,7.0])
plot2(qe,s1,so,name='flat real nh cmp',cmp=True,perc=99)
plot2(g,s1,so,name='cmp',cmp=True,perc=99,lims=cmplims)
ps2 = ps
ps = zerofloat(len(ps2),len(ps2[0]),len(ps2[0][0]))
Transpose.transposeP(ps2,ps)
plot3(ps,cmap=jet,s1=s1,s2=se,s3=sv,u=[up1,up2,ut],name='eta semblance est')
plot3(ps,cmap=jet,s1=s1,s2=se,s3=sv,name='eta semblance clip',perc=99)
plot3(ps,cmap=jet,s1=s1,s2=se,s3=sv,name='eta semblance')
plot3(ps,cmap=jet,s1=s1,s2=se,s3=sv,u=[vnmop,etap,ut],name='eta semblance exact')
def goNonHyp(data):
s1,so,g,vnmo,eta,wb = data
vpl = [1.40,3.00,0.005]
epl = [0,0.15,0.005]
r1min,r1max = -0.50,0.10
r2min,r2max = -0.40,0.08
dr = 0.02
nv = int((vpl[1]-vpl[0])/vpl[2])
ne = int((epl[1]-epl[0])/epl[2])
sv = Sampling(nv,vpl[2],vpl[0])
se = Sampling(ne,epl[2],epl[0])
# Test Data
#g = Gather(s1,so,sv)
#vnmo = g.makeLinearParameter(vpl[0],vpl[1])
#g = g.modelGatherHyp(30,1e6,vnmo)
s = SemblanceOld(s1,so,sv,se,20)
s.setStretchMute(0.50)
ps = s.applyNonHyp(g)
wbi = int(wb/s1.delta)
stk = stackSem2(ps)
ds = DynamicSolver(sv.count,se.count,r1min,r1max,r2min,r2max,dr,wbi)
uv,ue = ds.findSolution(stk,ps)
ue = add(se.first,mul(ue,se.delta))
uv = add(sv.first,mul(uv,sv.delta))
ut = floats(s1.getValues())
q = s.flattenGatherNonHypE(g,uv,ue)
qe = s.flattenGatherNonHypE(g,vnmo,eta)
cv,ce = s.pickMaxSemblance(ps,se)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdcv = mul(div(sub(cv,vnmo),vnmo),100)
vve=True
print 'plotting'
cmplims=[-6.5,s1.first,so.last,s1.last]
plotModelsCmp(g,s1,so,vnmo,eta,'modelscmp',clims=[cmplims[0],cmplims[2]])
plot1(stk,s1,name="sem stack")
plot1(sub(eta,ue),s1,name='eta dp difference')
plot1(vnmo,s1,uv,cv,name="vnmo")
plot1(pduv,s1,name='vnmo % difference dp')
plot1(pdcv,s1,name='vnmo % difference max')
plot1(eta,s1,ue,name="eta and eta estimated")
plot1(vnmo,s1,uv,name="vnmo and vnmo estimated")
plot2(ps[500],s1=sv,s2=se,name='vnmo vs eta t=500',vve=vve)
plot2(q,s1,so,name='flat cmp',cmp=True,perc=98)
plot2(qe,s1,so,name='flat real cmp',cmp=True,perc=98)
plot2(g,s1,so,name='cmp',cmp=True,perc=99,lims=cmplims)
plot3(ps,cmap=jet,s1=sv,s2=se,s3=s1,u=[uv,ue,ut],perc=99,\
name='eta semblance',paper='bpnhsu')
plot3(ps,cmap=jet,s1=sv,s2=se,s3=s1,name='eta semblance')
def stackSem(s):
n2 = len(s)
n1 = len(s[0])
stk = zerofloat(n1)
for i in range(n2):
stk = add(stk,s[i])
return stk
def stackSem2(s,vl1,vl2,sv):
n3 = len(s)
n2 = len(s[0])
n1 = len(s[0][0])
stk = zerofloat(n3)
for i3 in range(n3):
il = sv.indexOfNearest(vl1[i3])
iu = sv.indexOfNearest(vl2[i3])
for i2 in range(n2):
for i1 in range(il,iu):
stk[i3] += s[i3][i2][i1]
return stk
def stackGats2(g,s1):
f = g[-1]
es = ExponentialSmoother(1.0/(fpeak*s1.delta))
es.apply(f,f)
nt = len(f)
ht = HilbertTransformFilter()
ft = zerofloat(nt)
ht.apply(nt,f,ft)
h = sqrt(add(pow(f,2.0),pow(ft,2.0)));
return h
def getData(ix2,stack=None,xmax=None):
makeSubset(ix2)
s1,so,g,vp,de,ep,th,wb = readSubset(ix2,xmax)
if stack:
makeSubset(ix2-1)
makeSubset(ix2+1)
dat1 = readSubset(ix2-1)
dat2 = readSubset(ix2+1)
g1 = dat1[2]; g2 = dat2[2];
g = add(g1,add(g2,g))
eta = div(sub(ep,de),add(1,mul(2,de)))
#plot1(mul(vp,sqrt(add(1,mul(2,de)))),s1,name="vnmo exact interval")
#plot1(eta,s1,name="eta exact interval")
vrms,eeta = makeEffectiveParams(s1,vp,eta,de)
#plot1(vp,s1,hlabel="vp")
#plot1(ep,s1,hlabel="epsilon")
#plot1(de,s1,hlabel="delta")
#plot1(eta,s1,hlabel="eta")
#plot1(vrms,s1,hlabel="vrms")
#plot1(eeta,s1,hlabel="eeta")
#plot2(g,s1,so,cmap=gray,perc=99,cmp=True)
#vpz = getModel("vp")
#epz = getModel("epsilon")
#dez = getModel("delta")
#thz = getModel("theta")
#s1z,s2 = getSamplings()
#lims = [s2.first,s1z.first,20,s1z.last]
#vpz = mul(vpz,0.001)
#plot2(vpz,s1z,s2,name='vpz',vpz=True,paper="vpz",lims=lims,cbar="Velocity (km/s)",cmppts=ix2)
#plot2(epz,s1z,s2,name='epz',vpz=True,paper="epz",lims=lims,cbar="Epsilon",cmppts=ix2)
#plot2(dez,s1z,s2,name='dez',vpz=True,paper="dez",lims=lims,cbar="Delta",cmppts=ix2)
#plot2(thz,s1z,s2,name='thz',vpz=True,paper="thz",lims=lims,cbar="Theta",cmppts=ix2,cmap=rwb,perc=98)
return s1,so,g,vrms,eeta,wb,vp,eta
def makeEffectiveParams(s1,vp,eta,de):
n = len(vp)
dt = s1.delta
vrms = zerofloat(n)
eeta = zerofloat(n)
vpa = mul(vp,sqrt(add(1,mul(2,de))))
vps = mul(vpa,vpa)
vpq = mul(vps,vps)
to = floats(s1.getValues())
vrms[0] = vps[0]*dt
for i in range(1,n):
vrms[i] = vrms[i-1]+vps[i]*dt
dts = dt
for i in range(n):
vrms[i] /= dts
dts += dt
eeta[0] = vpq[0]*(1+8*eta[0])*dt
for i in range(1,n):
eeta[i] = eeta[i-1]+vpq[i]*(1+8*eta[i])*dt
vrms4 = zerofloat(n); dts = dt
for i in range(n):
vrms4[i] = vrms[i]*vrms[i]*dts
dts += dt
eeta = mul(sub(div(eeta,vrms4),1),0.125)
return mul(sqrt(vrms),0.001),eeta
################################################################################
# Tests
def goHypTest():
goTestData()
fpeak = 30
snr = 1.0e6
#snr = 5.0
#snr = 0.1
rmin,rmax = -0.10,0.10
dr = 0.01
print 'vcmin=',rmin*sv.delta,',vcmax=',rmax*sv.delta
print 'sparse grid=',st.delta/dr,'seconds'
print 'sigma=',1.0/(fpeak*st.delta)
print 'model a gather'
g = Gather(st,sx,sv)
vnmo = g.makeLinearParameter(2.1,3.5)
p = g.modelGatherHyp(fpeak,snr,vnmo)
print 'compute semblance spectrum'
s = Semblance(st,sx,sv)
s.setStretchMute(1.0)
ps = s.applyHyp(p)
print 'pick velocities with dynamic programming'
ds = DynamicSolver(sv.count,rmin,rmax,dr)
ac = ds.accumulate(transpose(ps))
uv = ds.findSolution(transpose(ps))
uv = add(sv.first,mul(uv,sv.delta))
q = s.flattenGatherHyp(p,uv)
cv = s.pickMaxSemblance(ps)
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdcv = mul(div(sub(cv,vnmo),vnmo),100)
print 'plotting'
#plot2(transpose(ac),cmap=jet,u=vs,s1=st,s2=sv,name='accumulated semblance',sem=True)
plot1(vnmo,st,uv,cv,name="vnmo")
plot2(ps,u=cv,s1=st,s2=sv,name='semblance max pick',sem=True)
plot1(pduv,st,name='vnmo % difference dp')
plot1(pdcv,st,name='vnmo % difference max')
plot1(vnmo,st,uv,name="vnmo and vnmo estimated")
plot2(ps,u=uv,s1=st,s2=sv,name='semblance',sem=True)
plot2(ps,s1=st,s2=sv,name='semblance',sem=True)
plot2(q,s1=st,s2=sx,name='flat cmp',cmp=True)
plot2(p,s1=st,s2=sx,name='cmp',cmp=True)
def goNonHypTest(data):
s1,so,g,vnmo,eta,wb,vpt,etat = data
st = s1
sx = so
#goTestData()
fpeak = 30
#snr = 1.0e6
snr = 5.0
#snr = 0.1
rmin1,rmax1 = -0.05,0.55
rmin2,rmax2 = -0.05,0.15
dr = 0.05
vpl = [1.492,3.00,0.004]
epl = [0.000,0.15,0.004]
nv = int((vpl[1]-vpl[0])/vpl[2])
ne = int((epl[1]-epl[0])/epl[2])
sv = Sampling(nv,vpl[2],vpl[0])
se = Sampling(ne,epl[2],epl[0])
print 'vcmin=',rmin1*sv.delta,',vcmax=',rmax1*sv.delta
print 'ecmin=',rmin2*se.delta,',ecmax=',rmax2*se.delta
print 'sparse grid=',st.delta/dr,'seconds'
print 'sigma=',1.0/(fpeak*st.delta)
print 'model a gather'
g = Gather(st,sx,sv,se)
#vnmo = g.makeLinearParameter(2.1,3.5)
#eta = g.makeLinearParameter(0.05,0.4)
p = g.modelGatherNonHyp(fpeak,snr,vnmo,eta)
rs = [g,vnmo,eta,p,fpeak,snr,rmin1,rmax1,rmin2,rmax2,dr,sv,se,st,sx]
goNonHypTest1(rs)
#goNonHypTest2(rs)
def goNonHypTest1(rs):
g,vnmo,eta,p,fpeak,snr,rmin1,rmax1,rmin2,rmax2,dr,sv,se,st,sx = rs
print 'compute semblance spectrum'
sw = Stopwatch(); sw.restart();
s = SemblanceOld(st,sx,sv,se,fpeak)
s.setStretchMute(1.0)
ps = s.applyNonHyp(p)
sw.stop(); print 'total time =',sw.time(),'seconds'
print 'pick velocities with dynamic programming'
ds = DynamicSolver(sv.count,se.count,rmin1,rmax1,rmin2,rmax2,dr)
uv,ue = ds.findSolution(ps)
ue = add(se.first,mul(ue,se.delta))
uv = add(sv.first,mul(uv,sv.delta))
ut = floats(st.getValues())
q = s.flattenGatherNonHypE(p,uv,ue)
qe = s.flattenGatherNonHypE(p,vnmo,eta)
cv,ce = s.pickMaxSemblance(ps,se)
plots(vnmo,eta,uv,ue,ut,p,q,ps,cv,ce,se,qe,st,sx,sv,se,"eta")
def goNonHypTest2(rs):
g,vnmo,eta,p,fpeak,snr,rmin1,rmax1,rmin2,rmax2,dr = rs
vhor = cvhor(vnmo,eta)
print 'compute semblance spectrum'
s = Semblance(st,sx,sv,se,fpeak)
s.setStretchMute(1.0)
ps = s.applyNonHyp(p,sh)
print 'pick velocities with dynamic programming'
ds = DynamicSolver(sv.count,sh.count,rmin1,rmax1,rmin2,rmax2,dr)
uv,uh = ds.findSolution(ps)
uh = add(sh.first,mul(uh,sh.delta))
uv = add(sv.first,mul(uv,sv.delta))
ut = floats(st.getValues())
q = s.flattenGatherNonHypH(p,uv,uh)
cv,ch = s.pickMaxSemblance(ps,sh)
ce = ceta(uv,uh)
plot1(eta,st,ce,name="eta from vhor")
plots(vnmo,vhor,uv,uh,ut,p,q,ps,cv,ch,sh,v2="vhor")
def plots(vnmo,eta,uv,ue,ut,p,q,ps,cv,ca,sa,qe,st,sx,sv,se,v2):
vve = None; vvh = None;
if v2=="eta": vve=True
if v2=="vhor": vvh=True
print 'plot '+v2
pduv = mul(div(sub(uv,vnmo),vnmo),100)
pdcv = mul(div(sub(cv,vnmo),vnmo),100)
plot1(eta,st,ue,ca,name=v2)
plot1(sub(eta,ue),st,name=v2+' dp difference')
plot1(sub(eta,ca),st,name=v2+' max difference')
plot1(eta,st,ue,name=v2,hlabel="eta")
plot1(vnmo,st,uv,cv,name='vnmo',hlabel="velocity (km/s)")
plot1(pduv,st,name='vnmo % difference dp')
plot1(pdcv,st,name='vnmo % difference max')
plot1(vnmo,st,uv,name='vnmo',hlabel="velocity (km/s)",paper='vnmoestm1')
plot1(eta,st,ue,name='eta',hlabel="eta",paper='estaestm1')
plot2(ps[900],s1=sv,s2=sa,name='vnmo vs '+v2+'t=900',vve=vve,vvh=vvh)
plot2(ps[700],s1=sv,s2=sa,name='vnmo vs '+v2+'t=700',vve=vve,vvh=vvh)
plot2(ps[500],s1=sv,s2=sa,name='vnmo vs '+v2+'t=500',vve=vve,vvh=vvh)
plot2(ps[300],s1=sv,s2=sa,name='vnmo vs '+v2+'t=300',vve=vve,vvh=vvh)
plot2(ps[100],s1=sv,s2=sa,name='vnmo vs '+v2+'t=100',vve=vve,vvh=vvh)
plot2(ps[10 ],s1=sv,s2=sa,name='vnmo vs '+v2+'t=10',vve=vve,vvh=vvh)
plot3(ps,cmap=jet,s1=sv,s2=sa,s3=st,u=[uv,ue,ut],perc=99,\
name=v2+' semblance',paper='nhsu')
plot3(ps,cmap=jet,s1=sv,s2=sa,s3=st,name=v2+' semblance')
plot2(q,s1=st,s2=sx,name=v2+' flat cmp',cmp=True)
plot2(qe,s1=st,s2=sx,name=v2+' flat exact cmp',cmp=True)
plot2(p,s1=st,s2=sx,name=v2+' cmp',cmp=True)
def cvhor(vnmo,eta):
return mul(vnmo,sqrt(add(mul(2,eta),1)))
def ceta(vnmo,vhor):
return mul(0.5,sub(pow(div(vhor,vnmo),2),1))
def goNishantTest():
omin = 0.025
omax = 2.5
do = 0.025
no = inro((omax-omin)/do)
so = Sampling(no,do,omin)
lt = 3
dt = 0.004
nt = inro(lt/dt)
st = Sampling(nt,dt,0.0)
fname = "/Users/amunoz/Home/data/bp/dat/test/pp_test.txt"
g = zerofloat(nt,no)
fil = File(fname)
br = BufferedReader(FileReader(fil))
for io in range(no):
for it in range(nt):
lns = br.readLine()
if lns:
lnf = Float.parseFloat(lns)
g[io][it] = lnf
br.close()
r1min,r1max = -0.50,0.50
r2min,r2max = -0.50,0.50
dr = 0.5
sv = Sampling(300,0.01,1.0)
se = Sampling(80,0.005,0)
#s = Semblance(st,so,15)
#ps = s.applyNonHypV(g,sv,se)
#ds = DynamicSolver(sv.count,se.count,r1min,r1max,r2min,r2max,dr)
#up1,up2 = ds.findSolution(ps)
#up1 = add(sv.first,mul(up1,sv.delta))
#up2 = add(se.first,mul(up2,se.delta))
#q = s.flattenGatherNonHypE(g,up1,up2)
#plot3(ps,cmap=jet,s1=sv,s2=se,s3=st,name='semblance')
#plot2(q,s1=st,s2=so,name='flat cmp',cmp=True)
#plot2(q,s1=st,s2=so,name='cmp',cmp=True)
s = SemblanceOld(st,so,sv,15)
ps = s.applyHyp(g)
plot2(ps,s1=st,s2=sv,name='semblance',sem=True)
plot2(g,s1=st,s2=so,name='cmp',cmp=True)
return
ds = DynamicSolver(sv.count,r1min,r1max,dr)
uv = ds.findSolution(transpose(ps))
uv = add(sv.first,mul(uv,sv.delta))
q = s.flattenGatherHyp(g,uv)
plot2(ps,u=uv,s1=st,s2=sv,name='semblance',sem=True)
plot2(ps,s1=st,s2=sv,name='semblance',sem=True)
plot2(q,s1=st,s2=so,name='flat cmp',cmp=True)
plot2(g,s1=st,s2=so,name='cmp',cmp=True)
###############################################################################
# Data Samplings
def goTestData():
global st,sx,sv,se,sh
global vp,ep,hp
vp = [1.8,4.0,0.02]
ep = [0.0,0.6,0.01]
hp = csvhor(vp,ep)
st = Sampling(1001,0.004,0.0)
sx = Sampling(251, 0.050,0.0)
nv = int((vp[1]-vp[0])/vp[2])
ne = int((ep[1]-ep[0])/ep[2])
nh = int((hp[1]-hp[0])/hp[2])
sv = Sampling(nv,vp[2],vp[0])
se = Sampling(ne,ep[2],ep[0])
sh = Sampling(nh,hp[2],hp[0])
print 'nv=',nv
print 'ne=',ne
print 'nh=',nh
print 'fv,lv=',vp[0],',',vp[1]
print 'fe,le=',ep[0],',',ep[1]
print 'fh,lh=',hp[0],',',hp[1]
def csvhor(vp,ep):
v1 = vp[0]*sqrt(2*ep[0]+1)
v2 = vp[1]*sqrt(2*ep[1]+1)
return [v1,v2,vp[2]]
###############################################################################
# Plots
gray = ColorMap.GRAY
jet = ColorMap.JET
rwb = ColorMap.RED_WHITE_BLUE
def plot1(x,s1=None,u=None,v=None,name=None,\
hlabel=None,cgs=None,paper=None,colm=1,hlim=None,bounds=None):
pp = PlotPanel(PlotPanel.Orientation.X1DOWN_X2RIGHT)
pv = pp.addPoints(x)
if s1: pv.set(s1,x)
if u: addLine(pp,u,s1,RED)
if v: addLine(pp,v,s1,BLUE)
if cgs and s1:
l = x
if u:
l = u
cgsv = add(s1.first,mul(floats(cgs),s1.delta))
cgsx = zerofloat(len(cgs))
for ic in range(len(cgs)):
cgsx[ic] = l[cgs[ic]]
pv = pp.addPoints(cgsv,cgsx)
pv.setStyle("rO")
if bounds:
ul,ll = bounds
addLine(pp,ul,s1,BLUE)
addLine(pp,ll,s1,BLUE)
pp.setVLabel("Time (s)")
if hlim: pp.setHLimits(hlim[0],hlim[1])
if hlabel: pp.setHLabel(hlabel)
frame(pp,name,[500,500],paper,colm)
def addLine(p,u,s=None,color=BLACK):
if s: pv = p.addPoints(s,u)
else: pv = p.addPoints(u)
pv.setLineColor(color)
def plot2(x,s1=None,s2=None,u=None,u2=None,cmap=None,\
cmin=0,cmax=0,perc=100,cbar=None,name=None,\
nrt=None,cmp=None,sem=None,vve=None,vvh=None,\
vpz=None,lims=None,cmppts=None,paper=None,\
size=[673,640],colm=1,mx=None):
if sem: cbar = "Semblance"
pan = panel(cbar)
pix = pan.addPixels(x)
if s1 and s2:
pix.set(s1,s2,x)
if cmp:
cmapo = gray
pan.setHLabel("Offset (km)")
pan.setVLabel("Time (s)")
size=[800,1000]
colm=2
if sem:
cmapo = jet
pan.setHLabel("Vnmo (km/s)")
pan.setVLabel("Time (s)")
colm=2
if vve:
cmapo = jet
pan.setHLabel("Eta")
pan.setVLabel("Vnmo (km/s)")
colm=2
if vvh:
cmapo = jet
pan.setHLabel("Vhor (km/s)")
pan.setVLabel("Vnmo (km/s)")
if vpz:
cmapo = jet
pan.setVLabel("Depth (km)")
pan.setHLabel("Distance (km)")
size = [569,537]
colm=3
if cmap: cmapo = cmap
pix.setColorModel(cmapo)
if nrt:
pix.setInterpolation(PixelsView.Interpolation.NEAREST)
if u:
if s1:
pt = pan.addPoints(s1,u)
else:
pt = pan.addPoints(u)
pt.setLineColor(WHITE)
pt.setLineWidth(3)
if u2:
if s1:
pt2 = pan.addPoints(s1,u2)
else:
pt2 = pan.addPoints(u2)
pt2.setLineColor(WHITE)
pt2.setLineWidth(3)
pt2.setLineStyle(PointsView.Line.DASH)
if mx:
mx1,mx2,mxe1,mxe2 = mx
# est
amx1 = zerofloat(1)
amx2 = zerofloat(1)
amx1[0],amx2[0] = mx1,mx2
# max
index = zeroint(2)
mv = max(x,index)
ipx1,ipx2 = index
px1,px2 = ipx1*s1.delta+s1.first,ipx2*s2.delta+s2.first
apx1 = zerofloat(1)
apx2 = zerofloat(1)
apx1[0],apx2[0] = px1,px2
# exact
aex1 = zerofloat(1)
aex2 = zerofloat(1)
aex1[0],aex2[0] = mxe1,mxe2
pv1 = pan.addPoints(amx1,amx2)
pv2 = pan.addPoints(apx1,apx2)
pv3 = pan.addPoints(aex1,aex2)
pv1.setStyle("wo")
pv2.setStyle("wx")
pv3.setStyle("wO")
pv1.setMarkSize(20)
pv2.setMarkSize(20)
pv3.setMarkSize(15)
if cmppts:
ix = cmppts
cmpl = fillfloat(s2.getValue(ix),1)
x1 = fillfloat(s1.delta*15,1)
pts = pan.addPoints(x1,cmpl)
pts.setMarkSize(10)
pts.setStyle("rO")
if cmin<cmax:
pix.setClips(cmin,cmax)
if perc<100:
pix.setPercentiles(100-perc,perc)
if lims:
pan.setLimits(lims[0],lims[1],lims[2],lims[3])
elif s1 and s2:
pan.setLimits(s2.first,s1.first,s2.last,s1.last)
frame(pan,name,paper=paper,size=size,colm=colm)
def plot3(x,cmap=jet,s1=None,s2=None,s3=None,u=None,\
cmin=0,cmax=0,perc=100,cbar=None,name=None,paper=None):
world = World()
if s1 and s2 and s3:
ipg = ImagePanelGroup(s1,s2,s3,x)
else:
ipg = ImagePanelGroup(x)
ipg.setColorModel(cmap)
if cmin<cmax:
ipg.setClips(cmin,cmax)
if perc<100:
ipg.setPercentiles(100-perc,perc)
world.addChild(ipg)
if u:
lg = makePoints(u,YELLOW,8)
world.addChild(lg)
if s1:
nt = s1.count
us = zerofloat(nt)
for i in range(nt):
us[i] = x[s3.indexOfNearest(u[0][i])][s2.indexOfNearest(u[1][i])][i]
plot1(us,s1,name="semblance along path "+name)
sf = SimpleFrame(world)
ov = sf.getOrbitView()
ov.setAxesScale(0.5,2.0,0.1)
ov.setScale(14.843407)
ov.setAzimuth(123.91459)
ov.setElevation(26.942446)
ipg.setSlices(\
round((7.008-s1.first)/s1.delta),\
round((0.112-s2.first)/s2.delta),\
round((1.659-s3.first)/s3.delta))
if name:
sf.setTitle(name)
sf.setVisible(setvis)
if paper and papers:
sf.paintToFile(pngDir+paper+".png")
if slides:
sf.paintToFile(pngDir+name+".png")
def makePoints(u,color,size):
x1,x2,x3 = u
n = len(x1)
xyz = zerofloat(3*n)
copy(n,0,1,x1,0,3,xyz)
copy(n,0,1,x2,1,3,xyz)
copy(n,0,1,x3,2,3,xyz)
rgb = None
pg = LineGroup(xyz,rgb)
ls = LineState()
ls.setWidth(size)
ls.setSmooth(False)
ss = StateSet()
ss.add(ls)
#pg = PointGroup(xyz,rgb)
#ps = PointState()
#ps.setSize(8)
#ps.setSmooth(False)
#ss = StateSet()
#ss.add(ps)
cs=ColorState()
cs.setColor(color)
ss.add(cs)
pg.setStates(ss)
return pg
def panel(cbar=None):
p = PlotPanel(PlotPanel.Orientation.X1DOWN_X2RIGHT)
cb = p.addColorBar()
#cb.setWidthMinimum(100)
if cbar:
cb.setLabel(cbar)
return p
def frame(panel,name,size=[600,800],paper=None,colm=None):
#panel.setVLabel('time (s)')
frame = PlotFrame(panel)
#frame.setBackground(Color(204,204,204,255))
#frame.setFontSizeForSlide(1.0,1.0)
frame.setSize(size[0],size[1])
if name:
frame.setTitle(name)
frame.setVisible(setvis)
if paper and papers:
if paper: name=paper
if colm==2:
frame.setFontSizeForPrint(8.0,469.0) # 2 column
frame.paintToPng(720,6.51,pngDir+name+'.png')
elif colm==3:
frame.setFontSizeForPrint(8.0,165.3) # 1/3 column
frame.paintToPng(720,2.17,pngDir+name+'.png')
else:
frame.setFontSizeForPrint(8.0,222.0) # 1 column
frame.paintToPng(720,3.08,pngDir+name+'.png')
if slides:
ar = 16.0/9.0;
fw = 0.9; fh = 0.90;
if colm==2:
fw = 0.45; fh = 0.90;
if colm==3:
fw = 0.25; fh = 0.90;
printSlide(frame,pngDir+name,fw,fh,ar)
"""
Method to print slides.
@param frame a PlotFrame class
@param fname the png file name
@param fw the fraction of the slide width that the figure occupies
@param fh the fraction of the slide height that the figure occupies
@param ar the aspect ratio of the slide (use keynote default pixel widths)
@param sc scalar for frame size that is dependent on screen resolution
(my MBP 15" needs sc=2)
"""
def printSlide(frame,fname,fw,fh,ar,sc=2):
swp = 1920 # 16/9 keynote slide default
if ar==4.0/3.0: swp = 1024 # 4/3 keynote slide default
fwi = int(swp*fw/sc)+1
fhi = int(swp/ar*fh/sc)+1
frame.setSize(fwi,fhi)
frame.setFontSizeForSlide(fw,fh,ar)
frame.paintToPng(swp*fw,1.0,fname+".png")
def plotModelsCmp(g,s1,so,v,e,name,clims=None,perc=99):
pp = PlotPanel(1,3,PlotPanel.Orientation.X1DOWN_X2RIGHT)
pg = pp.addPixels(0,0,s1,so,g)
pv = pp.addPoints(0,1,s1,v)
pe = pp.addPoints(0,2,s1,e)
pp.setVLabel(0,"Time (s)")
pp.setHLabel(0,"Offset (km)")
pp.setHLabel(1,"Velocity (km/s)")
pp.setHLabel(2,"Eta")
pg.setPercentiles(100-perc,perc)
if clims:
pp.setHLimits(0,clims[0],clims[1])
frame = PlotFrame(pp)
frame.setSize(800,400)
frame.setTitle(name)
frame.setVisible(setvis)
if papers:
frame.setFontSizeForPrint(8.0,469.0) # 2 column
frame.paintToPng(720,6.51,pngDir+name+'.png')
if slides:
frame.setFontSizeForSlide(1.0,0.9,16.0/9.0) # 2 column
frame.paintToPng(720,2.4,pngDir+name+'.png')
def plot1comp(s1,v1,v2,dv,name,clims=None,clims2=None,vel=None,eta=None):
pp = PlotPanel(1,2,PlotPanel.Orientation.X1DOWN_X2RIGHT)
pv1 = pp.addPoints(0,0,s1,v1)
pv2 = pp.addPoints(0,0,s1,v2)
pv2.setStyle("r-")
pd = pp.addPoints(0,1,s1,dv)
pp.setVLabel(0,"Time (s)")
if vel:
pp.setHLabel(0,"Velocity (km/s)")
pp.setHLabel(1,"% Difference")
if eta:
pp.setHLabel(0,"Eta")
pp.setHLabel(1,"Difference")
if clims:
pp.setHLimits(0,clims[0],clims[1])
frame = PlotFrame(pp)
if clims2:
pp.setHLimits(1,clims2[0],clims2[1])
#frame.setSize(646,600)
frame.setSize(658,555)
frame.setTitle(name)
frame.setVisible(setvis)
if papers:
frame.setFontSizeForPrint(8.0,469.0) # 2 column
frame.paintToPng(720,6.51,pngDir+name+'.png')
if slides:
frame.setFontSizeForSlide(0.65,0.9,16.0/9.0) # 2 column
frame.paintToPng(720,2.4,pngDir+name+'.png')
def plot1special(ver,eer):
pp = PlotPanel(1,2,PlotPanel.Orientation.X1DOWN_X2RIGHT)
pv1 = pp.addPoints(0,0,ver)
pv2 = pp.addPoints(0,1,eer)
pp.setHLabel(0,"Total eta error")
pp.setHLabel(1,"Total vnmo error")
pp.setVLabel(0,"CMP")
frame = PlotFrame(pp)
frame.setSize(658,555)
frame.setVisible(True)
frame.setFontSizeForSlide(0.65,0.9,16.0/9.0) # 2 column
frame.paintToPng(720,2.4,'./rms.png')
"""
Converts 1D array to 1D float array
"""
def floats(x):
n = len(x)
xd = zerofloat(n)
for i in range(n):
xd[i] = float(x[i])
return xd
def inro(x):
return int(round(x))
def ince(x):
return int(ceil(x))
def infl(x):
return int(floor(x))
#------------------------------------------------------------------------------#
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
|
# pylint: disable=protected-access,exec-used
import math
import os
import re
import json
import tempfile
from typing import List, Tuple, Callable, BinaryIO, Optional
from abc import ABC, abstractmethod
from . import Type, Value, Expr, Env, Error
from ._util import byte_size_units, chmod_R_plus
class Base:
"""
Base class for standard library implementations. An instance has an
attribute with the name of each available function and a ``Function``
object providing the type-checking logic and implementation.
Subclasses may replace these objects with custom context-dependent logic,
or add new ones. For example, ``stdout()`` is only meaningful in task
output sections.
"""
_write_dir: str # directory in which write_* functions create files
def __init__(self, write_dir: str = ""):
self._write_dir = write_dir if write_dir else tempfile.gettempdir()
# language built-ins
self._at = _At()
self._land = _And()
self._lor = _Or()
self._negate = StaticFunction(
"_negate", [Type.Boolean()], Type.Boolean(), lambda x: Value.Boolean(not x.value)
)
self._add = _AddOperator()
self._sub = _ArithmeticOperator("-", lambda l, r: l - r)
self._mul = _ArithmeticOperator("*", lambda l, r: l * r)
self._div = _ArithmeticOperator("/", lambda l, r: l // r)
self._rem = StaticFunction(
"_rem", [Type.Int(), Type.Int()], Type.Int(), lambda l, r: Value.Int(l.value % r.value)
)
self._eqeq = _ComparisonOperator("==", lambda l, r: l == r)
self._neq = _ComparisonOperator("!=", lambda l, r: l != r)
self._lt = _ComparisonOperator("<", lambda l, r: l < r)
self._lte = _ComparisonOperator("<=", lambda l, r: l <= r)
self._gt = _ComparisonOperator(">", lambda l, r: l > r)
self._gte = _ComparisonOperator(">=", lambda l, r: l >= r)
# static stdlib functions
def static(
argument_types: List[Type.Base], return_type: Type.Base, name: Optional[str] = None
):
"""
helper/decorator to create a static function from type signature and a lambda
"""
return lambda F: setattr(
self,
name or F.__name__,
StaticFunction(name or F.__name__, argument_types, return_type, F),
)
static([Type.Float()], Type.Int(), "floor")(lambda v: Value.Int(math.floor(v.value)))
static([Type.Float()], Type.Int(), "ceil")(lambda v: Value.Int(math.ceil(v.value)))
static([Type.Float()], Type.Int(), "round")(lambda v: Value.Int(round(v.value)))
static([Type.Array(Type.Any())], Type.Int(), "length")(lambda v: Value.Int(len(v.value)))
@static([Type.String(), Type.String(), Type.String()], Type.String())
def sub(input: Value.String, pattern: Value.String, replace: Value.String) -> Value.String:
return Value.String(re.compile(pattern.value).sub(replace.value, input.value))
static([Type.String(), Type.String(optional=True)], Type.String())(basename)
@static([Type.Any(optional=True)], Type.Boolean())
def defined(v: Value.Base):
return Value.Boolean(not isinstance(v, Value.Null))
# write_*
static([Type.Array(Type.String())], Type.File(), "write_lines")(
self._write(_serialize_lines)
)
static([Type.Array(Type.Array(Type.String()))], Type.File(), "write_tsv")(
self._write(_serialize_tsv)
)
static([Type.Map((Type.Any(), Type.Any()))], Type.File(), "write_map")(
self._write(_serialize_map)
)
static([Type.Any()], Type.File(), "write_json")(
self._write(lambda v, outfile: outfile.write(json.dumps(v.json).encode("utf-8")))
)
# read_*
static([Type.File()], Type.Int(), "read_int")(self._read(lambda s: Value.Int(int(s))))
static([Type.File()], Type.Boolean(), "read_boolean")(self._read(_parse_boolean))
static([Type.File()], Type.String(), "read_string")(
self._read(lambda s: Value.String(s[:-1] if s.endswith("\n") else s))
)
static([Type.File()], Type.Float(), "read_float")(
self._read(lambda s: Value.Float(float(s)))
)
static([Type.File()], Type.Map((Type.String(), Type.String())), "read_map")(
self._read(_parse_map)
)
static([Type.File()], Type.Array(Type.String()), "read_lines")(self._read(_parse_lines))
static([Type.File()], Type.Array(Type.Array(Type.String())), "read_tsv")(
self._read(_parse_tsv)
)
static([Type.File()], Type.Any(), "read_json")(self._read(_parse_json))
# polymorphically typed stdlib functions which require specialized
# infer_type logic
self.range = _Range()
self.prefix = _Prefix()
self.size = _Size(self)
self.select_first = _SelectFirst()
self.select_all = _SelectAll()
self.zip = _Zip()
self.cross = _Cross()
self.flatten = _Flatten()
self.transpose = _Transpose()
def _read(self, parse: Callable[[str], Value.Base]) -> Callable[[Value.File], Value.Base]:
"generate read_* function implementation based on parse"
def f(file: Value.File) -> Value.Base:
with open(self._devirtualize_filename(file.value), "r") as infile:
return parse(infile.read())
return f
def _devirtualize_filename(self, filename: str) -> str:
"""
'devirtualize' filename passed to a read_* function: return a filename that can be open()ed
on the local host. Subclasses may further wish to forbid access to files outside of a
designated directory or whitelist (by raising an exception)
"""
raise NotImplementedError()
def _write(
self, serialize: Callable[[Value.Base, BinaryIO], None]
) -> Callable[[Value.Base], Value.File]:
"generate write_* function implementation based on serialize"
def _f(v: Value.Base,) -> Value.File:
os.makedirs(self._write_dir, exist_ok=True)
with tempfile.NamedTemporaryFile(dir=self._write_dir, delete=False) as outfile:
outfile: BinaryIO = outfile # pyre-ignore
serialize(v, outfile)
filename = outfile.name
chmod_R_plus(filename, file_bits=0o660)
vfn = self._virtualize_filename(filename)
return Value.File(vfn)
return _f
def _virtualize_filename(self, filename: str) -> str:
"""
from a local path in write_dir, 'virtualize' into the filename as it should present in a
File value
"""
raise NotImplementedError()
def _override_static(self, name: str, f: Callable) -> None:
# replace the implementation lambda of a StaticFunction (keeping its
# types etc. the same)
sf = getattr(self, name)
assert isinstance(sf, StaticFunction)
setattr(sf, "F", f)
class Function(ABC):
# Abstract interface to a standard library function implementation
@abstractmethod
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
# Typecheck the Apply expression (including the argument expressions);
# raise an exception or return the function's return type, which may
# depend on the argument types.
pass
@abstractmethod
def __call__(
self, expr: "Expr.Apply", env: Env.Bindings[Value.Base], stdlib: Base
) -> Value.Base:
# Invoke the function, evaluating the arguments as needed
pass
class EagerFunction(Function):
# Function helper providing boilerplate for eager argument evaluation.
# Implementation is responsible for any appropriate type coercion of
# argument and return values.
@abstractmethod
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
pass
def __call__(
self, expr: "Expr.Apply", env: Env.Bindings[Value.Base], stdlib: Base
) -> Value.Base:
return self._call_eager(expr, [arg.eval(env, stdlib=stdlib) for arg in expr.arguments])
class StaticFunction(EagerFunction):
# Function helper for static argument and return types.
# In this case the boilerplate can handle the coercions.
name: str
argument_types: List[Type.Base]
return_type: Type.Base
F: Callable
def __init__(
self, name: str, argument_types: List[Type.Base], return_type: Type.Base, F: Callable
) -> None:
self.name = name
self.argument_types = argument_types
self.return_type = return_type
self.F = F
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
min_args = len(self.argument_types)
for ty in reversed(self.argument_types):
if ty.optional:
min_args = min_args - 1
else:
break
if len(expr.arguments) > len(self.argument_types) or len(expr.arguments) < min_args:
raise Error.WrongArity(expr, len(self.argument_types))
for i in range(len(expr.arguments)):
try:
expr.arguments[i].typecheck(self.argument_types[i])
except Error.StaticTypeMismatch:
raise Error.StaticTypeMismatch(
expr.arguments[i],
self.argument_types[i],
expr.arguments[i].type,
"for {} argument #{}".format(self.name, i + 1),
) from None
return self.return_type
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
argument_values = [arg.coerce(ty) for arg, ty in zip(arguments, self.argument_types)]
try:
ans: Value.Base = self.F(*argument_values)
except Exception as exn:
msg = "function evaluation failed"
if str(exn):
msg += ", " + str(exn)
raise Error.EvalError(expr, msg) from exn
return ans.coerce(self.return_type)
def _notimpl(*args, **kwargs) -> None:
exec("raise NotImplementedError('function not available in this context')")
class TaskOutputs(Base):
"""
Defines type signatures for functions only available in task output sections.
(Implementations left to by overridden by the task runtime)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
for (name, argument_types, return_type, F) in [
("stdout", [], Type.File(), _notimpl),
("stderr", [], Type.File(), _notimpl),
("glob", [Type.String()], Type.Array(Type.File()), _notimpl),
]:
setattr(self, name, StaticFunction(name, argument_types, return_type, F))
def basename(*args) -> Value.String:
assert len(args) in (1, 2)
assert isinstance(args[0], Value.String)
path = args[0].value
if len(args) > 1:
assert isinstance(args[1], Value.String)
suffix = args[1].value
if path.endswith(suffix):
path = path[: -len(suffix)]
return Value.String(os.path.basename(path))
def _parse_lines(s: str) -> Value.Array:
ans = []
if s:
ans = [Value.String(line) for line in (s[:-1] if s.endswith("\n") else s).split("\n")]
return Value.Array(Type.String(), ans)
def _parse_boolean(s: str) -> Value.Boolean:
s = s.rstrip()
if s == "true":
return Value.Boolean(True)
if s == "false":
return Value.Boolean(False)
raise Error.InputError('read_boolean(): file content is not "true" or "false"')
def _parse_tsv(s: str) -> Value.Array:
# TODO: should a blank line parse as [] or ['']?
ans = [
Value.Array(
Type.Array(Type.String()), [Value.String(field) for field in line.value.split("\t")]
)
for line in _parse_lines(s).value
]
# pyre-ignore
return Value.Array(Type.Array(Type.String()), ans)
def _parse_map(s: str) -> Value.Map:
keys = set()
ans = []
for line in _parse_tsv(s).value:
assert isinstance(line, Value.Array)
if len(line.value) != 2:
raise Error.InputError("read_map(): each line must have two fields")
if line.value[0].value in keys:
raise Error.InputError("read_map(): duplicate key")
keys.add(line.value[0].value)
ans.append((line.value[0], line.value[1]))
return Value.Map((Type.String(), Type.String()), ans)
def _parse_json(s: str) -> Value.Base:
# TODO: parse int/float/boolean inside map or list as such
j = json.loads(s)
if isinstance(j, dict):
ans = []
for k in j:
ans.append((Value.String(str(k)), Value.String(str(j[k]))))
return Value.Map((Type.String(), Type.String()), ans)
if isinstance(j, list):
return Value.Array(Type.String(), [Value.String(str(v)) for v in j])
if isinstance(j, bool):
return Value.Boolean(j)
if isinstance(j, int):
return Value.Int(j)
if isinstance(j, float):
return Value.Float(j)
if j is None:
return Value.Null()
raise Error.InputError("parse_json()")
def _serialize_lines(array: Value.Array, outfile: BinaryIO) -> None:
for item in array.value:
outfile.write(item.coerce(Type.String()).value.encode("utf-8"))
outfile.write(b"\n")
def _serialize_tsv(v: Value.Array, outfile: BinaryIO) -> None:
return _serialize_lines(
Value.Array(
Type.String(),
[
Value.String("\t".join([part.coerce(Type.String()).value for part in parts.value]))
for parts in v.value
],
),
outfile,
)
def _serialize_map(map: Value.Map, outfile: BinaryIO) -> None:
lines = []
for (k, v) in map.value:
k = k.coerce(Type.String()).value
v = v.coerce(Type.String()).value
if "\n" in k or "\t" in k or "\n" in v or "\t" in v:
raise ValueError(
"write_map(): keys & values must not contain tab or newline characters"
)
lines.append(Value.String(k + "\t" + v))
_serialize_lines(Value.Array(Type.String(), lines), outfile)
class _At(EagerFunction):
# Special function for array access arr[index], returning the element type
# or map access map[key], returning the value type
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
lhs = expr.arguments[0]
rhs = expr.arguments[1]
if isinstance(lhs.type, Type.Array):
if isinstance(lhs, Expr.Array) and not lhs.items:
# the user wrote: [][idx]
raise Error.OutOfBounds(expr)
try:
rhs.typecheck(Type.Int())
except Error.StaticTypeMismatch:
raise Error.StaticTypeMismatch(rhs, Type.Int(), rhs.type, "Array index") from None
return lhs.type.item_type
if isinstance(lhs.type, Type.Map):
if lhs.type.item_type is None:
raise Error.OutOfBounds(expr)
try:
rhs.typecheck(lhs.type.item_type[0])
except Error.StaticTypeMismatch:
raise Error.StaticTypeMismatch(
rhs, lhs.type.item_type[0], rhs.type, "Map key"
) from None
return lhs.type.item_type[1]
raise Error.NotAnArray(lhs)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
assert len(expr.arguments) == 2 and len(arguments) == 2
lhs = arguments[0]
rhs = arguments[1]
if isinstance(lhs, Value.Map):
mty = expr.arguments[0].type
assert isinstance(mty, Type.Map)
key = rhs.coerce(mty.item_type[0])
ans = None
for k, v in lhs.value:
if rhs == k:
ans = v
if ans is None:
raise Error.OutOfBounds(expr.arguments[1]) # TODO: KeyNotFound
return ans
else:
lhs = lhs.coerce(Type.Array(Type.Any()))
rhs = rhs.coerce(Type.Int())
if rhs.value < 0 or rhs.value >= len(lhs.value):
raise Error.OutOfBounds(expr.arguments[1])
return lhs.value[rhs.value]
class _And(Function):
# logical && with short-circuit evaluation
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
for arg in expr.arguments:
if not isinstance(arg.type, Type.Boolean):
raise Error.IncompatibleOperand(arg, "non-Boolean operand to &&")
if expr._check_quant and arg.type.optional:
raise Error.IncompatibleOperand(arg, "optional Boolean? operand to &&")
return Type.Boolean()
def __call__(
self, expr: "Expr.Apply", env: Env.Bindings[Value.Base], stdlib: Base
) -> Value.Base:
lhs = expr.arguments[0].eval(env, stdlib=stdlib).expect(Type.Boolean()).value
if not lhs:
return Value.Boolean(False)
return expr.arguments[1].eval(env, stdlib=stdlib).expect(Type.Boolean())
class _Or(Function):
# logical || with short-circuit evaluation
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
for arg in expr.arguments:
if not isinstance(arg.type, Type.Boolean):
raise Error.IncompatibleOperand(arg, "non-Boolean operand to ||")
if expr._check_quant and arg.type.optional:
raise Error.IncompatibleOperand(arg, "optional Boolean? operand to ||")
return Type.Boolean()
def __call__(
self, expr: "Expr.Apply", env: Env.Bindings[Value.Base], stdlib: Base
) -> Value.Base:
lhs = expr.arguments[0].eval(env, stdlib=stdlib).expect(Type.Boolean()).value
if lhs:
return Value.Boolean(True)
return expr.arguments[1].eval(env, stdlib=stdlib).expect(Type.Boolean())
class _ArithmeticOperator(EagerFunction):
# arithmetic infix operators
# operands may be Int or Float; return Float iff either operand is Float
name: str
op: Callable
def __init__(self, name: str, op: Callable) -> None:
self.name = name
self.op = op
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
rt = Type.Int()
if isinstance(expr.arguments[0].type, Type.Float) or isinstance(
expr.arguments[1].type, Type.Float
):
rt = Type.Float()
try:
expr.arguments[0].typecheck(rt)
expr.arguments[1].typecheck(rt)
except Error.StaticTypeMismatch:
raise Error.IncompatibleOperand(
expr, "Non-numeric operand to " + self.name + " operator"
) from None
return rt
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
ans_type = self.infer_type(expr)
ans = self.op(arguments[0].coerce(ans_type).value, arguments[1].coerce(ans_type).value)
if ans_type == Type.Int():
assert isinstance(ans, int)
return Value.Int(ans)
assert isinstance(ans, float)
return Value.Float(ans)
class _AddOperator(_ArithmeticOperator):
# + operator can also serve as concatenation for String.
def __init__(self) -> None:
super().__init__("+", lambda l, r: l + r)
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
t2 = None
if isinstance(expr.arguments[0].type, Type.String):
t2 = expr.arguments[1].type
elif isinstance(expr.arguments[1].type, Type.String):
t2 = expr.arguments[0].type
if t2 is None:
# neither operand is a string; defer to _ArithmeticOperator
return super().infer_type(expr)
if not t2.coerces(Type.String(optional=not expr._check_quant)):
raise Error.IncompatibleOperand(
expr,
"Cannot add/concatenate {} and {}".format(
str(expr.arguments[0].type), str(expr.arguments[1].type)
),
)
return Type.String()
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
ans_type = self.infer_type(expr)
if not isinstance(ans_type, Type.String):
return super()._call_eager(expr, arguments)
ans = self.op(
str(arguments[0].coerce(Type.String()).value),
str(arguments[1].coerce(Type.String()).value),
)
assert isinstance(ans, str)
return Value.String(ans)
class InterpolationAddOperator(_AddOperator):
# + operator within an interpolation; accepts String? operands, evaluating to None if either
# operand is None.
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
either_string = sum(1 for arg in expr.arguments if isinstance(arg.type, Type.String)) > 0
either_optional = sum(1 for arg in expr.arguments if arg.type.optional) > 0
both_stringifiable = (
sum(1 for arg in expr.arguments if arg.type.coerces(Type.String(optional=True))) > 1
)
return (
Type.String(optional=True)
if either_string and either_optional and both_stringifiable
else super().infer_type(expr)
)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
if sum(1 for arg in arguments if isinstance(arg, Value.Null)):
return Value.Null()
return super()._call_eager(expr, arguments)
class _ComparisonOperator(EagerFunction):
# Comparison operators can compare any two operands of the same type.
# Furthermore, given one Int and one Float, coerces the Int to Float for
# comparison.
name: str
op: Callable
def __init__(self, name: str, op: Callable) -> None:
self.name = name
self.op = op
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
assert len(expr.arguments) == 2
if (
(
expr._check_quant
and expr.arguments[0].type.optional != expr.arguments[1].type.optional
)
or (
self.name not in ["==", "!="]
and (expr.arguments[0].type.optional or expr.arguments[1].type.optional)
)
or (
not (
expr.arguments[0].type.copy(optional=False)
== expr.arguments[1].type.copy(optional=False)
or (
isinstance(expr.arguments[0].type, Type.Int)
and isinstance(expr.arguments[1].type, Type.Float)
)
or (
isinstance(expr.arguments[0].type, Type.Float)
and isinstance(expr.arguments[1].type, Type.Int)
)
)
)
):
raise Error.IncompatibleOperand(
expr,
"Cannot compare {} and {}".format(
str(expr.arguments[0].type), str(expr.arguments[1].type)
),
)
return Type.Boolean()
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
assert len(arguments) == 2
return Value.Boolean(self.op(arguments[0].value, arguments[1].value))
class _Size(EagerFunction):
# size(): first argument can be File? or Array[File?]
stdlib: Base
def __init__(self, stdlib: Base) -> None:
self.stdlib = stdlib
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if not expr.arguments:
raise Error.WrongArity(expr, 1)
arg0ty = expr.arguments[0].type
if not arg0ty.coerces(Type.File(optional=True)):
if isinstance(arg0ty, Type.Array):
if arg0ty.optional or not arg0ty.item_type.coerces(Type.File(optional=True)):
raise Error.StaticTypeMismatch(
expr.arguments[0], Type.Array(Type.File(optional=True)), arg0ty
)
else:
raise Error.StaticTypeMismatch(expr.arguments[0], Type.File(optional=True), arg0ty)
if len(expr.arguments) == 2:
if expr.arguments[1].type != Type.String():
raise Error.StaticTypeMismatch(
expr.arguments[1], Type.String(), expr.arguments[1].type
)
elif len(expr.arguments) > 2:
raise Error.WrongArity(expr, 2)
return Type.Float()
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
# this default implementation attempts os.path.getsize() on the argument(s)
files = arguments[0].coerce(Type.Array(Type.File()))
unit = arguments[1].coerce(Type.String()) if len(arguments) > 1 else None
ans = []
for file in files.value:
ans.append(os.path.getsize(self.stdlib._devirtualize_filename(file.value)))
ans = float(sum(ans))
if unit:
try:
ans /= float(byte_size_units[unit.value])
except KeyError:
raise Error.EvalError(expr, "size(): invalid unit " + unit.value)
return Value.Float(ans)
class _SelectFirst(EagerFunction):
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 1:
raise Error.WrongArity(expr, 1)
arg0ty = expr.arguments[0].type
if not isinstance(arg0ty, Type.Array) or (
expr.arguments[0]._check_quant and arg0ty.optional
):
raise Error.StaticTypeMismatch(expr.arguments[0], Type.Array(Type.Any()), arg0ty)
if isinstance(arg0ty.item_type, Type.Any):
raise Error.IndeterminateType(expr.arguments[0], "can't infer item type of empty array")
return arg0ty.item_type.copy(optional=False)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
arr = arguments[0].coerce(Type.Array(Type.Any()))
assert isinstance(arr, Value.Array)
for arg in arr.value:
if not isinstance(arg, Value.Null):
return arg
raise Error.NullValue(expr)
class _SelectAll(EagerFunction):
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 1:
raise Error.WrongArity(expr, 1)
arg0ty = expr.arguments[0].type
if not isinstance(arg0ty, Type.Array) or (
expr.arguments[0]._check_quant and arg0ty.optional
):
raise Error.StaticTypeMismatch(expr.arguments[0], Type.Array(Type.Any()), arg0ty)
if isinstance(arg0ty.item_type, Type.Any):
raise Error.IndeterminateType(expr.arguments[0], "can't infer item type of empty array")
return Type.Array(arg0ty.item_type.copy(optional=False))
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
arr = arguments[0].coerce(Type.Array(Type.Any()))
assert isinstance(arr, Value.Array)
arrty = arr.type
assert isinstance(arrty, Type.Array)
return Value.Array(
arrty.item_type, [arg for arg in arr.value if not isinstance(arg, Value.Null)]
)
class _ZipOrCross(EagerFunction):
# 'a array -> 'b array -> ('a,'b) array
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 2:
raise Error.WrongArity(expr, 2)
arg0ty: Type.Base = expr.arguments[0].type
if not isinstance(arg0ty, Type.Array) or (expr._check_quant and arg0ty.optional):
raise Error.StaticTypeMismatch(expr.arguments[0], Type.Array(Type.Any()), arg0ty)
if isinstance(arg0ty.item_type, Type.Any):
raise Error.IndeterminateType(expr.arguments[0], "can't infer item type of empty array")
arg1ty: Type.Base = expr.arguments[1].type
if not isinstance(arg1ty, Type.Array) or (expr._check_quant and arg1ty.optional):
raise Error.StaticTypeMismatch(expr.arguments[1], Type.Array(Type.Any()), arg1ty)
if isinstance(arg1ty.item_type, Type.Any):
raise Error.IndeterminateType(expr.arguments[1], "can't infer item type of empty array")
return Type.Array(
Type.Pair(arg0ty.item_type, arg1ty.item_type),
nonempty=(arg0ty.nonempty or arg1ty.nonempty),
)
def _coerce_args(
self, expr: "Expr.Apply", arguments: List[Value.Base]
) -> Tuple[Type.Array, Value.Array, Value.Array]:
ty = self.infer_type(expr)
assert isinstance(ty, Type.Array) and isinstance(ty.item_type, Type.Pair)
lhs = arguments[0].coerce(Type.Array(ty.item_type.left_type))
rhs = arguments[1].coerce(Type.Array(ty.item_type.right_type))
assert isinstance(lhs, Value.Array) and isinstance(rhs, Value.Array)
return (ty, lhs, rhs)
class _Zip(_ZipOrCross):
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Array:
ty, lhs, rhs = self._coerce_args(expr, arguments)
assert isinstance(ty, Type.Array) and isinstance(ty.item_type, Type.Pair)
if len(lhs.value) != len(rhs.value):
raise Error.EvalError(expr, "zip(): input arrays must have equal length")
return Value.Array(
ty.item_type,
[
Value.Pair(
ty.item_type.left_type, ty.item_type.right_type, (lhs.value[i], rhs.value[i])
)
for i in range(len(lhs.value))
],
)
class _Cross(_ZipOrCross):
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Array:
ty, lhs, rhs = self._coerce_args(expr, arguments)
assert isinstance(ty, Type.Array) and isinstance(ty.item_type, Type.Pair)
return Value.Array(
ty.item_type,
[
Value.Pair(ty.item_type.left_type, ty.item_type.right_type, (lhs_item, rhs_item))
for lhs_item in lhs.value
for rhs_item in rhs.value
],
)
class _Flatten(EagerFunction):
# t array array -> t array
# TODO: if any of the input arrays are statically nonempty then so is output
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 1:
raise Error.WrongArity(expr, 1)
expr.arguments[0].typecheck(Type.Array(Type.Any()))
# TODO: won't handle implicit coercion from T to Array[T]
arg0ty = expr.arguments[0].type
assert isinstance(arg0ty, Type.Array)
if isinstance(arg0ty.item_type, Type.Any):
return Type.Array(Type.Any())
if not isinstance(arg0ty.item_type, Type.Array) or (
expr._check_quant and arg0ty.item_type.optional
):
raise Error.StaticTypeMismatch(
expr.arguments[0], Type.Array(Type.Array(Type.Any())), arg0ty
)
return Type.Array(arg0ty.item_type.item_type)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
ty = self.infer_type(expr)
assert isinstance(ty, Type.Array)
ans = []
for row in arguments[0].coerce(Type.Array(ty)).value:
ans.extend(row.value)
return Value.Array(ty.item_type, ans)
class _Transpose(EagerFunction):
# t array array -> t array array
# TODO: if any of the input arrays are statically nonempty then so is output
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 1:
raise Error.WrongArity(expr, 1)
expr.arguments[0].typecheck(Type.Array(Type.Any()))
# TODO: won't handle implicit coercion from T to Array[T]
arg0ty = expr.arguments[0].type
assert isinstance(arg0ty, Type.Array)
if isinstance(arg0ty.item_type, Type.Any):
return Type.Array(Type.Any())
if not isinstance(arg0ty.item_type, Type.Array) or (
expr._check_quant and arg0ty.item_type.optional
):
raise Error.StaticTypeMismatch(
expr.arguments[0], Type.Array(Type.Array(Type.Any())), arg0ty
)
return Type.Array(Type.Array(arg0ty.item_type.item_type))
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
ty = self.infer_type(expr)
assert isinstance(ty, Type.Array) and isinstance(ty.item_type, Type.Array)
mat = arguments[0].coerce(ty)
assert isinstance(mat, Value.Array)
n = None
ans = []
for row in mat.value:
assert isinstance(row, Value.Array)
if n is None:
n = len(row.value)
ans = [Value.Array(ty.item_type, []) for _ in row.value]
if len(row.value) != n:
raise Error.EvalError(expr, "transpose(): ragged input matrix")
for i in range(len(row.value)):
ans[i].value.append(row.value[i])
return Value.Array(ty.item_type, ans)
class _Range(EagerFunction):
# int -> int array
# with special case: if the argument is a positive integer literal or
# length(a_nonempty_array), then we can say the returned array is nonempty.
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 1:
raise Error.WrongArity(expr, 1)
expr.arguments[0].typecheck(Type.Int())
nonempty = False
arg0 = expr.arguments[0]
if isinstance(arg0, Expr.Int) and arg0.value > 0:
nonempty = True
if isinstance(arg0, Expr.Apply) and arg0.function_name == "length":
arg00ty = arg0.arguments[0].type
if isinstance(arg00ty, Type.Array) and arg00ty.nonempty:
nonempty = True
return Type.Array(Type.Int(), nonempty=nonempty)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
arg0 = arguments[0].coerce(Type.Int())
assert isinstance(arg0, Value.Int)
if arg0.value < 0:
raise Error.EvalError(expr, "range() got negative argument")
return Value.Array(Type.Int(), [Value.Int(x) for x in range(arg0.value)])
class _Prefix(EagerFunction):
# string -> t array -> string array
# if input array is nonempty then so is output
def infer_type(self, expr: "Expr.Apply") -> Type.Base:
if len(expr.arguments) != 2:
raise Error.WrongArity(expr, 2)
expr.arguments[0].typecheck(Type.String())
expr.arguments[1].typecheck(Type.Array(Type.String()))
arg1ty = expr.arguments[1].type
return Type.Array(
Type.String(), nonempty=(isinstance(arg1ty, Type.Array) and arg1ty.nonempty)
)
def _call_eager(self, expr: "Expr.Apply", arguments: List[Value.Base]) -> Value.Base:
pfx = arguments[0].coerce(Type.String()).value
return Value.Array(
Type.String(),
[Value.String(pfx + s.coerce(Type.String()).value) for s in arguments[1].value],
)
|
import numpy as np
import emcee
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_parametros_derivados import parametros_derivados
#Rellenar acá:
model='EXP'
datasets = 'CC+SN+AGN'
num_params = '4params'
#root_directory=path_datos_global+'/Resultados_cadenas/Paper/'+model
root_directory=path_datos_global+'/Resultados_cadenas'
root_directory
os.chdir(root_directory)
filename = 'sample_'+model+'_'+datasets+'_'+num_params
filename_h5 = filename+'.h5'
reader = emcee.backends.HDFBackend(filename_h5)
nwalkers, ndim = reader.shape #Numero de caminantes y de parametros
#%%%
samples = reader.get_chain()
burnin= int(0.2*len(samples[:,0])) #Burnin del 20%
thin = 1
#%% Defino el burnin y el thin a partir de tau o los pongo a mano
tau = reader.get_autocorr_time()
#burnin = int(2 * np.max(tau))
#thin = int(0.5 * np.min(tau))
#%%
samples = reader.get_chain(discard=burnin, flat=True, thin=thin)
print(len(samples)) #numero de pasos efectivos
print('Tiempo estimado:{} min'.format(len(samples)/60))
new_samples = parametros_derivados(reader,discard=burnin,thin=thin,model=model)
#%%
np.savez(filename+'_deriv', new_samples=new_samples)
#dir = path_datos_global+'/Resultados_cadenas/posprocesado'
os.chdir(root_directory)
with np.load(filename+'_deriv.npz') as data:
ns = data['new_samples']
|
#coding:utf-8
#
# id: bugs.core_5273
# title: Crash when attempt to create database with running trace ( internal Firebird consistency check (cannot find tip page (165), file: tra.cpp line: 2233) )
# decription:
# 1. Get the content of firebird.log before test.
# 2. Make config file and launch trace session, with separate logging of its STDOUT and STDERR.
# 3. Make DDLfile and run ISQL, with separate logging of its STDOUT and STDERR.
# 4. Stop trace session
# 5. Get the content of firebird.log after test.
# 6. Ensure that files which should store STDERR results are empty.
# 7. Ensure that there is no difference in the content of firebird.log.
#
# Confirmed on 4.0.0.254 (SS, SC):
# 1) unexpected STDERR logs:
# + Unexpected STDERR, file ... mp_5273_ddl.err: Statement failed, SQLSTATE = 08006
# + Unexpected STDERR, file ... mp_5273_ddl.err: Error reading data from the connection.
# + Unexpected STDERR, file ... mp_5273_ddl.err: After line 3 in file ... mp_5273_ddl.sql
# + Unexpected STDERR, file ... mp_trace_5273.err: Error reading data from the connection.
# 2) diff in firebird.log:
# +CSPROG Thu Jun 16 14:17:13 2016
# + Database: C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\TMP_5273.FDB
# + internal Firebird consistency check (cannot find tip page (165), file: tra.cpp line: 2233)
# +
# +
# +CSPROG Thu Jun 16 14:17:13 2016
# + INET/inet_error: read errno = 10054, server host = localhost, address = 127.0.0.1/3430
#
# Works fine on 4.0.0.256.
#
# tracker_id: CORE-5273
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import time
# import subprocess
# import difflib
# from subprocess import Popen
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# def svc_get_fb_log( f_fb_log ):
#
# global subprocess
#
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# "action_get_fb_log"
# ],
# stdout=f_fb_log,
# stderr=subprocess.STDOUT
# )
#
# return
#
#
#
#
# tmpfdb1=os.path.join(context['temp_directory'],'tmp_5273.fdb')
# cleanup( tmpfdb1, )
#
# sql_ddl=''' set list on;
# set bail on;
# create database 'localhost:%(tmpfdb1)s';
# select mon$database_name from mon$database;
# commit;
# drop database;
# ''' % locals()
#
# trace_options = '''# Trace config, format for 3.0 and above. Generated auto, do not edit!
# database=%[\\\\\\\\/]tmp_5273.fdb
# {
# enabled = true
# log_sweep = true
# log_errors = true
# time_threshold = 0
# log_connections = true
# log_transactions = true
# log_statement_prepare = true
# log_statement_start = true
# log_statement_finish = true
# log_statement_free = true
# log_trigger_start = true
# log_trigger_finish = true
# print_perf = true
# max_sql_length = 16384
# max_log_size = 5000000
# }
# services
# {
# enabled = false
# log_services = true
# log_service_query = true
# log_errors = true
# }
# '''
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_5273.cfg'), 'w')
# f_trccfg.write(trace_options)
# flush_and_close( f_trccfg )
#
# # Get content of firebird.log BEFORE test:
# ##########################################
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5273_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# # Starting trace session in new child process (async.):
# #######################################################
#
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_5273.log'), 'w')
# f_trcerr=open( os.path.join(context['temp_directory'],'tmp_trace_5273.err'), 'w')
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_start",
# "trc_cfg", f_trccfg.name],
# stdout=f_trclog,
# stderr=f_trcerr
# )
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(2)
#
# f_create_db_sql = open( os.path.join(context['temp_directory'],'tmp_5273_ddl.sql'), 'w')
# f_create_db_sql.write(sql_ddl)
# flush_and_close( f_create_db_sql )
#
#
# # CREATE DATABASE
# #################
#
# f_create_db_log = open( os.path.join(context['temp_directory'],'tmp_5273_ddl.log'), 'w')
# f_create_db_err = open( os.path.join(context['temp_directory'],'tmp_5273_ddl.err'), 'w')
# subprocess.call( [context['isql_path'], "-q", "-i", f_create_db_sql.name ],
# stdout = f_create_db_log,
# stderr = f_create_db_err
# )
# flush_and_close( f_create_db_log )
# flush_and_close( f_create_db_err )
#
#
# #####################################################
# # Getting ID of launched trace session and STOP it:
#
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_5273.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_list"],
# stdout=f_trclst,
# stderr=subprocess.STDOUT
# )
# flush_and_close( f_trclst )
#
# trcssn=0
# with open( f_trclst.name,'r') as f:
# for line in f:
# i=1
# if 'Session ID' in line:
# for word in line.split():
# if i==3:
# trcssn=word
# i=i+1
# break
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
# f_trclst=open(f_trclst.name,'a')
# f_trclst.seek(0,2)
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_stop",
# "trc_id",trcssn],
# stdout=f_trclst, stderr=subprocess.STDOUT
# )
# flush_and_close( f_trclst )
#
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
# # On windows preliminary termination of running trace before it completes 'stop' request leads to:
# # INET/inet_error: read errno = 10054, client host = ..., address = ::1/62963, user = ...
# time.sleep(1)
#
# # Terminate child process of launched trace session (though it should already be killed):
# p_trace.terminate()
# flush_and_close( f_trclog )
# flush_and_close( f_trcerr )
#
#
# # Get content of firebird.log AFTER test:
# #########################################
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5273_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after )
#
# # STDERR for ISQL (that created DB) and trace session - they both must be EMPTY:
# #################
# f_list=[f_create_db_err, f_trcerr]
# for i in range(len(f_list)):
# f_name=f_list[i].name
# if os.path.getsize(f_name) > 0:
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line)
#
# # DIFFERENCE in the content of firebird.log should be EMPTY:
# ####################
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5273_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# print("Unexpected DIFF in firebird.log: "+line)
#
#
# # Cleanup:
# ##########
# time.sleep(1)
# cleanup( (f_create_db_sql, f_create_db_log, f_create_db_err, f_trccfg, f_trclst, f_trclog, f_trcerr,f_fblog_before,f_fblog_after,f_diff_txt, tmpfdb1) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
|
# Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from appimagebuilder.context import Context
class Command:
"""Represent a single action in the AppImage creation process"""
def __init__(self, context: Context, description):
self.context = context
self.description = description
def id(self):
pass
def __call__(self, *args, **kwargs):
pass
|
from tkinter import *
from Switch import encrypt_input
window = Tk()
def encrypt():
text = encrypt_input(txt1.get())
txt1.delete(0, "end")
txt1.insert(0, str(text))
v = ""
window.title("SwapEncryptor")
lbl1 = Label(window, text="Enter text")
txt1 = Entry(window, width=30, textvariable=v)
btn1 = Button(window, text="Encrypt/Decrypt", command=encrypt)
lbl1.grid(column=1, row=1)
txt1.grid(column=1, row=2)
btn1.grid(column=1, row=3)
window.grid_columnconfigure(0, weight=1)
window.grid_columnconfigure(2, weight=1)
window.grid_rowconfigure(0, weight=1)
window.grid_rowconfigure(4, weight=1)
window.geometry('350x200')
window.mainloop()
|
def sum(n,x):
total = 0
for i in range(1, n+1):
total = total + (1 / x)**i
return total
x = int(input())
n = int(input())
print(round(sum(x, n),2))
def solutionRec(n,x):
if n == 1:
return 1/x
else:
return (1/x)**n + solutionRec(n-1,x)
print(round(solutionRec(x, n),2))
|
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .aspp_head import ASPPHead, ASPPModule
def get_freq_indices(method):
assert method in ['top1','top2','top4','top8','top16','top32',
'bot1','bot2','bot4','bot8','bot16','bot32',
'low1','low2','low4','low8','low16','low32']
num_freq = int(method[3:])
if 'top' in method:
all_top_indices_x = [0,0,6,0,0,1,1,4,5,1,3,0,0,0,3,2,4,6,3,5,5,2,6,5,5,3,3,4,2,2,6,1]
all_top_indices_y = [0,1,0,5,2,0,2,0,0,6,0,4,6,3,5,2,6,3,3,3,5,1,1,2,4,2,1,1,3,0,5,3]
mapper_x = all_top_indices_x[:num_freq]
mapper_y = all_top_indices_y[:num_freq]
elif 'low' in method:
all_low_indices_x = [0,0,1,1,0,2,2,1,2,0,3,4,0,1,3,0,1,2,3,4,5,0,1,2,3,4,5,6,1,2,3,4]
all_low_indices_y = [0,1,0,1,2,0,1,2,2,3,0,0,4,3,1,5,4,3,2,1,0,6,5,4,3,2,1,0,6,5,4,3]
mapper_x = all_low_indices_x[:num_freq]
mapper_y = all_low_indices_y[:num_freq]
elif 'bot' in method:
all_bot_indices_x = [6,1,3,3,2,4,1,2,4,4,5,1,4,6,2,5,6,1,6,2,2,4,3,3,5,5,6,2,5,5,3,6]
all_bot_indices_y = [6,4,4,6,6,3,1,4,4,5,6,5,2,2,5,1,4,3,5,0,3,1,1,2,4,2,1,1,5,3,3,3]
mapper_x = all_bot_indices_x[:num_freq]
mapper_y = all_bot_indices_y[:num_freq]
else:
raise NotImplementedError
return mapper_x, mapper_y
class MultiSpectralDCTLayer(nn.Module):
"""
Generate dct filters
"""
def __init__(self, height, width, mapper_x, mapper_y, channel):
super(MultiSpectralDCTLayer, self).__init__()
assert len(mapper_x) == len(mapper_y)
assert channel % len(mapper_x) == 0
self.num_freq = len(mapper_x)
# fixed DCT init
self.register_buffer('weight', self.get_dct_filter(height, width, mapper_x, mapper_y, channel))
# fixed random init
# self.register_buffer('weight', torch.rand(channel, height, width))
# learnable DCT init
# self.register_parameter('weight', self.get_dct_filter(height, width, mapper_x, mapper_y, channel))
# learnable random init
# self.register_parameter('weight', torch.rand(channel, height, width))
# num_freq, h, w
def forward(self, x):
assert len(x.shape) == 4, 'x must been 4 dimensions, but got ' + str(len(x.shape))
# n, c, h, w = x.shape
x = x * self.weight
result = torch.sum(x, dim=[2, 3])
return result
def build_filter(self, pos, freq, POS):
result = math.cos(math.pi * freq * (pos + 0.5) / POS) / math.sqrt(POS)
if freq == 0:
return result
else:
return result * math.sqrt(2)
def get_dct_filter(self, tile_size_x, tile_size_y, mapper_x, mapper_y, channel):
dct_filter = torch.zeros(channel, tile_size_x, tile_size_y)
c_part = channel // len(mapper_x)
for i, (u_x, v_y) in enumerate(zip(mapper_x, mapper_y)):
for t_x in range(tile_size_x):
for t_y in range(tile_size_y):
dct_filter[i * c_part: (i + 1) * c_part, t_x, t_y] = self.build_filter(t_x, u_x,
tile_size_x) * self.build_filter(
t_y, v_y, tile_size_y)
return dct_filter
class MultiSpectralAttentionLayer(torch.nn.Module):
def __init__(self, channel, dct_h, dct_w, reduction = 16, freq_sel_method = 'top16'):
super(MultiSpectralAttentionLayer, self).__init__()
self.reduction = reduction
self.dct_h = dct_h
self.dct_w = dct_w
mapper_x, mapper_y = get_freq_indices(freq_sel_method)
self.num_split = len(mapper_x)
mapper_x = [temp_x * (dct_h // 7) for temp_x in mapper_x]
mapper_y = [temp_y * (dct_w // 7) for temp_y in mapper_y]
# make the frequencies in different sizes are identical to a 7x7 frequency space
# eg, (2,2) in 14x14 is identical to (1,1) in 7x7
self.dct_layer = MultiSpectralDCTLayer(dct_h, dct_w, mapper_x, mapper_y, channel)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
n,c,h,w = x.shape
x_pooled = x
if h != self.dct_h or w != self.dct_w:
x_pooled = torch.nn.functional.adaptive_avg_pool2d(x, (self.dct_h, self.dct_w))
# If you have concerns about one-line-change, don't worry. :)
# In the ImageNet models, this line will never be triggered.
# This is for compatibility in instance segmentation and object detection.
y = self.dct_layer(x_pooled)
y = self.fc(y).view(n, c, 1, 1)
return x * y.expand_as(x)
class DepthwiseSeparableASPPModule(ASPPModule):
"""Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable
conv."""
def __init__(self, **kwargs):
super(DepthwiseSeparableASPPModule, self).__init__(**kwargs)
for i, dilation in enumerate(self.dilations):
if dilation > 1:
self[i] = DepthwiseSeparableConvModule(
self.in_channels,
self.channels,
3,
dilation=dilation,
padding=dilation,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
@HEADS.register_module()
class DepthwiseSeparableASPPHead(ASPPHead):
"""Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation.
This head is the implementation of `DeepLabV3+
<https://arxiv.org/abs/1802.02611>`_.
Args:
c1_in_channels (int): The input channels of c1 decoder. If is 0,
the no decoder will be used.
c1_channels (int): The intermediate channels of c1 decoder.
"""
def __init__(self, c1_in_channels, c1_channels, **kwargs):
super(DepthwiseSeparableASPPHead, self).__init__(**kwargs)
assert c1_in_channels >= 0
self.c2_channels = 512
self.c3_channels = 1024
self.c4_channels = 2048
self.aspp_modules = DepthwiseSeparableASPPModule(
dilations=self.dilations,
in_channels=self.in_channels,
channels=self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if c1_in_channels > 0:
self.c1_fca_att = MultiSpectralAttentionLayer(c1_in_channels, 56, 56)
else:
self.c1_fca_att = None
self.c2_fca_att = MultiSpectralAttentionLayer(self.c2_channels, 28, 28)
self.c3_fca_att = MultiSpectralAttentionLayer(self.c3_channels, 14, 14)
self.c4_fca_att = MultiSpectralAttentionLayer(self.c4_channels, 7, 7)
self.bottleneck2 = ConvModule(
self.channels + self.c2_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.bottleneck3 = ConvModule(
self.channels + self.c3_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.bottleneck4 = ConvModule(
self.channels + self.c4_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.sep_bottleneck = nn.Sequential(
DepthwiseSeparableConvModule(
self.channels + c1_channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
DepthwiseSeparableConvModule(
self.channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
aspp_outs = [
resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
]
aspp_outs.extend(self.aspp_modules(x))
aspp_outs = torch.cat(aspp_outs, dim=1)
output = self.bottleneck(aspp_outs) # 3x3conv channels = 512
# print("aspp_outs bottleneck: {}".format(output.shape))
# c4跨层
c4_output = self.c4_fca_att(inputs[3])
output = torch.cat([output, c4_output], dim=1) # channels = 2048+512
output = self.bottleneck4(output) # 3x3conv channels = 512
# print("bottleneck4: {}".format(output.shape))
# c3跨层 2倍上采样
c3_output = self.c3_fca_att(inputs[2])
output = resize(
input=output,
size=inputs[2].shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = torch.cat([output, c3_output], dim=1)
output = self.bottleneck3(output) # 3x3conv channels = 512
# print("bottleneck3: {}".format(output.shape))
# c2跨层 2倍上采样
c2_output = self.c2_fca_att(inputs[1])
output = resize(
input=output,
size=inputs[1].shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = torch.cat([output, c2_output], dim=1)
output = self.bottleneck2(output) # 3x3conv channels = 512
# print("bottleneck2: {}".format(output.shape))
# c1跨层 2倍上采样
# output = resize(
# input=output,
# size=inputs[0].shape[2:],
# mode='bilinear',
# align_corners=self.align_corners)
# output = torch.cat([output, inputs[0]], dim=1)
if self.c1_fca_att is not None:
c1_output = self.c1_fca_att(inputs[0])
output = resize(
input=output,
size=c1_output.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = torch.cat([output, c1_output], dim=1)
output = self.sep_bottleneck(output)
# print("sep_bottleneck: {}".format(output.shape))
output = self.cls_seg(output)
# print("cls_seg: {}".format(output.shape))
return output
|
import functools
import flux
from flask import g
from flask_security import current_user
def updates_last_active(func):
from . import models
@functools.wraps(func)
def new_func(*args, **kwargs):
if hasattr(g, 'token_user'):
u = g.token_user
elif current_user.is_authenticated:
u = models.User.query.get(current_user.id)
else:
u = None
if u is not None:
u.last_activity = flux.current_timeline.time() # pylint: disable=no-member
models.db.session.add(u)
return func(*args, **kwargs)
return new_func
|
from .model.model import Model
from .aggregator.aggregator import Aggregator
class Ensemble(object):
def __init__(self):
self.models = []
self.aggregator = None
def add_models(self, models):
for model in models:
self.add_model(model)
def add_model(self, model):
if not isinstance(model, Model):
raise TypeError("model must be of type pyml_ensemble.model.Model")
self.models.append(model)
def set_aggregator(self, aggregator):
if not isinstance(aggregator, Aggregator):
raise TypeError("aggregator must be of type pyml_ensemble.aggregator.Aggregator")
self.aggregator = aggregator
def train(self, x, y):
"""
par x - a list of training data samples, one list entry for each model.
par y - a list of training data targets, one list entry for each model.
Note: x[i] <-> y[i]
"""
for i in range(0, len(self.models)):
self.models[i].train(x[i], y[i])
def predict(self, x):
if self.aggregator is None:
raise RuntimeError("no aggregator defined for ensemble")
predictions = []
for model in self.models:
predictions.append(model.get_prediction(x))
return self.aggregator.combine(predictions)
def call_all(self, function_name):
"""
Calls a function for each ensemble method by name and returns a list
of return values from the method call.
"""
return_list = []
for model in self.models:
return_list.append(getattr(model, function_name)())
return return_list
|
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" Local UT test, run with `sh test_python_ut.sh`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import graphlearn as gl
import graphlearn.python.tests.utils as utils
from graphlearn.python.tests.test_edge import EdgeTestCase
class EdgeIterateTestCase(EdgeTestCase):
def test_edge_iterate(self):
file_path = self.gen_test_data([utils.WEIGHTED], False)
decoder = gl.Decoder(weighted=True)
g = gl.Graph() \
.edge(source=file_path, edge_type=self.edge_tuple_, decoder=decoder)
g.init(tracker=utils.TRACKER_PATH)
batch_size = 4
sampler = g.edge_sampler('first',
batch_size=batch_size, strategy="by_order")
res_src = []
res_dst = []
max_iter = 100
for _ in range(max_iter):
try:
edges = sampler.get()
utils.check_edge_weights(edges)
res_src.extend(list(edges.src_ids))
res_dst.extend(list(edges.dst_ids))
except gl.OutOfRangeError:
break
src_ids = range(self.src_range_[0], self.src_range_[1])
dst_ids = range(self.dst_range_[0], self.dst_range_[1])
utils.check_sorted_equal(res_src, src_ids)
utils.check_sorted_equal(res_dst, dst_ids)
sampler = g.edge_sampler('first', batch_size=batch_size,
strategy="random")
max_iter = 10
src_ids = range(self.src_range_[0], self.src_range_[1])
dst_ids = range(self.dst_range_[0], self.dst_range_[1])
for i in range(max_iter):
edges = sampler.get()
utils.check_edge_weights(edges)
utils.check_subset(edges.src_ids, src_ids)
utils.check_subset(edges.dst_ids, dst_ids)
g.close()
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
File defining the global variables used in the main program
and all subfunctions.
"""
# --------------------------------------------------------
# --------------------- USER NAMELIST --------------------
# --------------------------------------------------------
# Output control
#-------------------------------------------------
out_fname = 'output' # file name of output
iout = 360 # write every iout-th time-step into the output file
iiniout = 1 # write initial field (0 = no, 1 = yes)
# Domain size
#-------------------------------------------------
xl = 500000. # domain size [m]
nx = 100 # number of grid points in horizontal direction
dx = xl/nx # horizontal resolution [m]
thl = 60. # domain depth [K]
nz = 60 # vertical resolution
dt = 10 # time step [s]
diff = 0.2 # (horizontal) diffusion coefficient
time = 6*60*60 # integration time [s]
# Topography
#-------------------------------------------------
topomx = 1500 # mountain height [m]
topowd = 50000 # mountain half width [m]
topotim = 1800 # mountain growth time [s]
# Initial atmosphere
#-------------------------------------------------
u00 = 15. # initial velocity [m/s]
bv00 = 0.01 # Brunt-Vaisalla frequency [1/s]
th00 = 280. # potential temperature at surface
ishear = 0 # wind shear simulation (0 = no shear, 1 = shear)
k_shl = 5 # bottom level of wind shear layer (ishear = 1)
# bottom level of wind layer is 0 (index)
k_sht = 8 # top level of wind shear layer (ishear = 1)
# top level of wind layer is nz-1 (index)
u00_sh = 10. # initial velocity below shear layer [m/s] (ishear = 1)
# u00 is speed above shear layer [m/s] #orig 0.
# Boundaries
#-------------------------------------------------
nab = 30 # number of grid points in absorber
diffabs = 1. # maximum value of absorber
irelax = 1 # lateral boundaries (0 = periodic, 1 = relax)
nb = 2 # number of boundary points on each side
# Print options
#-------------------------------------------------
idbg = 0 # print debugging text (0 = not print, 1 = print)
iprtcfl = 1 # print Courant number (0 = not print, 1 = print)
itime = 1 # print computation time (0 = not print, 1 = print)
# Physics: Moisture
#-------------------------------------------------
imoist = 1 # include moisture (0 = dry, 1 = moist)
imoist_diff = 1 # apply diffusion to qv, qc, qr (0 = off, 1 = on)
imicrophys = 1 # include microphysics (0 = off, 1 = kessler, 2 = two moment)
idthdt = 0 # couple physics to dynamics (0 = off, 1 = on)
iern = 0 # evaporation of rain droplets (0 = off, 1 = on)
# Options for Kessler scheme
#-------------------------------------------------
vt_mult = 1. # multiplication factor for termianl fall velocity
autoconv_th = 0.0001 # critical cloud water mixing ratio for the onset
# of autoconversion [kg/kg]
autoconv_mult = 1. # multiplication factor for autoconversion
sediment_on = 1 # switch to turn on / off sedimentation
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Physical constants
#--------------------------
g = 9.81 # gravity
cp = 1004. # specific heat of air at constant pressure
r = 287. # gas constant of air [J/kgK]
r_v = 461. # gas constant of vapor [J/kgK]
rdcp = r/cp # short cut for R/Cp
cpdr = cp/r # short cut for Cp/R
pref = 100*1000. # reference pressure in SI units (Pa, not hPa!)
z00 = 0. # surface height
prs00 = pref # upstream surface pressure (= ref. pressure)
exn00 = cp*(prs00/pref)**rdcp #
# compute input parameters
#--------------------------
dth = thl/nz # spacing between vertical layers [K]
nts = round(time/dt,0) # number of iterations
nout = int(nts/iout) # number of output steps
nx1 = nx + 1 # number of staggered gridpoints in x
nz1 = nz + 1 # number of staggered gridpoints in z
nxb = nx + 2*nb # x range of unstaggered variable
nxb1 = nx1 + 2*nb # x range of staggered variable
# END OF NAMELIST.PY
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d_chill_openmp/tmp_files/7096.c')
procedure('kernel_heat_3d')
loop(0)
known('n>3')
pragma(0,2,"omp parallel for private(t4,t6,t8,t10,t12)")
tile(0,2,16,2)
tile(0,4,64,4)
pragma(1,2,"omp parallel for private(t4,t6,t8,t10,t12)")
tile(1,2,16,2)
tile(1,4,64,4)
|
import tensorflow as tf
# 声明两个变量并计算他们的和
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name='v1')
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name='v2')
result = v1 + v2
init_op = tf.global_variables_initializer()
# 声明tf.train.Saver类用于保存模型
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init_op)
# 将模型保存到Saved_model/model.ckpt文件
saver.save(sess, "Saved_model/model.ckpt")
|
# ----------------------------------------------------------------------------------
# Electrum plugin for the Digital Bitbox hardware wallet by Shift Devices AG
# digitalbitbox.com
#
import base64
import binascii
import hashlib
import hmac
import json
import math
import os
import re
import struct
import sys
import time
import copy
from electrum_but.crypto import sha256d, EncodeAES_base64, EncodeAES_bytes, DecodeAES_bytes, hmac_oneshot
from electrum_but.bitcoin import public_key_to_p2pkh
from electrum_but.bip32 import BIP32Node, convert_bip32_intpath_to_strpath, is_all_public_derivation
from electrum_but import ecc
from electrum_but.ecc import msg_magic
from electrum_but.wallet import Standard_Wallet
from electrum_but import constants
from electrum_but.transaction import Transaction, PartialTransaction, PartialTxInput
from electrum_but.i18n import _
from electrum_but.keystore import Hardware_KeyStore
from electrum_but.util import to_string, UserCancelled, UserFacingException, bfh
from electrum_but.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_but.network import Network
from electrum_but.logging import get_logger
from electrum_but.plugin import runs_in_hwd_thread, run_in_hwd_thread
from ..hw_wallet import HW_PluginBase, HardwareClientBase
_logger = get_logger(__name__)
try:
import hid
DIGIBOX = True
except ImportError as e:
DIGIBOX = False
# ----------------------------------------------------------------------------------
# USB HID interface
#
def to_hexstr(s):
return binascii.hexlify(s).decode('ascii')
def derive_keys(x):
h = sha256d(x)
h = hashlib.sha512(h).digest()
return (h[:32],h[32:])
MIN_MAJOR_VERSION = 5
ENCRYPTION_PRIVKEY_KEY = 'encryptionprivkey'
CHANNEL_ID_KEY = 'comserverchannelid'
class DigitalBitbox_Client(HardwareClientBase):
def __init__(self, plugin, hidDevice):
HardwareClientBase.__init__(self, plugin=plugin)
self.dbb_hid = hidDevice
self.opened = True
self.password = None
self.isInitialized = False
self.setupRunning = False
self.usbReportSize = 64 # firmware > v2.0.0
@runs_in_hwd_thread
def close(self):
if self.opened:
try:
self.dbb_hid.close()
except:
pass
self.opened = False
def is_pairable(self):
return True
def is_initialized(self):
return self.dbb_has_password()
def is_paired(self):
return self.password is not None
def has_usable_connection_with_device(self):
try:
self.dbb_has_password()
except BaseException:
return False
return True
def _get_xpub(self, bip32_path):
if self.check_device_dialog():
return self.hid_send_encrypt(('{"xpub": "%s"}' % bip32_path).encode('utf8'))
def get_xpub(self, bip32_path, xtype):
assert xtype in self.plugin.SUPPORTED_XTYPES
reply = self._get_xpub(bip32_path)
if reply:
xpub = reply['xpub']
# Change type of xpub to the requested type. The firmware
# only ever returns the mainnet standard type, but it is agnostic
# to the type when signing.
if xtype != 'standard' or constants.net.TESTNET:
node = BIP32Node.from_xkey(xpub, net=constants.BitcoinMainnet)
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
else:
raise Exception('no reply')
def dbb_has_password(self):
reply = self.hid_send_plain(b'{"ping":""}')
if 'ping' not in reply:
raise UserFacingException(_('Device communication error. Please unplug and replug your Digital Bitbox.'))
if reply['ping'] == 'password':
return True
return False
def stretch_key(self, key: bytes):
return to_hexstr(hashlib.pbkdf2_hmac('sha512', key, b'Digital Bitbox', iterations = 20480))
def backup_password_dialog(self):
msg = _("Enter the password used when the backup was created:")
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return None
if len(password) < 4:
msg = _("Password must have at least 4 characters.") \
+ "\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") \
+ "\n\n" + _("Enter password:")
else:
return password.encode('utf8')
def password_dialog(self, msg):
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return False
if len(password) < 4:
msg = _("Password must have at least 4 characters.") + \
"\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") + \
"\n\n" + _("Enter password:")
else:
self.password = password.encode('utf8')
return True
def check_device_dialog(self):
match = re.search(r'v([0-9])+\.[0-9]+\.[0-9]+',
run_in_hwd_thread(self.dbb_hid.get_serial_number_string))
if match is None:
raise Exception("error detecting firmware version")
major_version = int(match.group(1))
if major_version < MIN_MAJOR_VERSION:
raise Exception("Please upgrade to the newest firmware using the BitBox Desktop app: https://shiftcrypto.ch/start")
# Set password if fresh device
if self.password is None and not self.dbb_has_password():
if not self.setupRunning:
return False # A fresh device cannot connect to an existing wallet
msg = _("An uninitialized Digital Bitbox is detected.") + " " + \
_("Enter a new password below.") + "\n\n" + \
_("REMEMBER THE PASSWORD!") + "\n\n" + \
_("You cannot access your coins or a backup without the password.") + "\n" + \
_("A backup is saved automatically when generating a new wallet.")
if self.password_dialog(msg):
reply = self.hid_send_plain(b'{"password":"' + self.password + b'"}')
else:
return False
# Get password from user if not yet set
msg = _("Enter your Digital Bitbox password:")
while self.password is None:
if not self.password_dialog(msg):
raise UserCancelled()
reply = self.hid_send_encrypt(b'{"led":"blink"}')
if 'error' in reply:
self.password = None
if reply['error']['code'] == 109:
msg = _("Incorrect password entered.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
else:
# Should never occur
msg = _("Unexpected error occurred.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
# Initialize device if not yet initialized
if not self.setupRunning:
self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet
elif not self.isInitialized:
reply = self.hid_send_encrypt(b'{"device":"info"}')
if reply['device']['id'] != "":
self.recover_or_erase_dialog() # Already seeded
else:
self.seed_device_dialog() # Seed if not initialized
self.mobile_pairing_dialog()
return self.isInitialized
def recover_or_erase_dialog(self):
msg = _("The Digital Bitbox is already seeded. Choose an option:") + "\n"
choices = [
(_("Create a wallet using the current seed")),
(_("Load a wallet from the micro SD card (the current seed is overwritten)")),
(_("Erase the Digital Bitbox"))
]
reply = self.handler.query_choice(msg, choices)
if reply is None:
return # user cancelled
if reply == 2:
self.dbb_erase()
elif reply == 1:
if not self.dbb_load_backup():
return
else:
if self.hid_send_encrypt(b'{"device":"info"}')['device']['lock']:
raise UserFacingException(_("Full 2FA enabled. This is not supported yet."))
# Use existing seed
self.isInitialized = True
def seed_device_dialog(self):
msg = _("Choose how to initialize your Digital Bitbox:") + "\n"
choices = [
(_("Generate a new random wallet")),
(_("Load a wallet from the micro SD card"))
]
reply = self.handler.query_choice(msg, choices)
if reply is None:
return # user cancelled
if reply == 0:
self.dbb_generate_wallet()
else:
if not self.dbb_load_backup(show_msg=False):
return
self.isInitialized = True
def mobile_pairing_dialog(self):
dbb_user_dir = None
if sys.platform == 'darwin':
dbb_user_dir = os.path.join(os.environ.get("HOME", ""), "Library", "Application Support", "DBB")
elif sys.platform == 'win32':
dbb_user_dir = os.path.join(os.environ["APPDATA"], "DBB")
else:
dbb_user_dir = os.path.join(os.environ["HOME"], ".dbb")
if not dbb_user_dir:
return
try:
# Python 3.5+
jsonDecodeError = json.JSONDecodeError
except AttributeError:
jsonDecodeError = ValueError
try:
with open(os.path.join(dbb_user_dir, "config.dat")) as f:
dbb_config = json.load(f)
except (FileNotFoundError, jsonDecodeError):
return
if ENCRYPTION_PRIVKEY_KEY not in dbb_config or CHANNEL_ID_KEY not in dbb_config:
return
choices = [
_('Do not pair'),
_('Import pairing from the Digital Bitbox desktop app'),
]
reply = self.handler.query_choice(_('Mobile pairing options'), choices)
if reply is None:
return # user cancelled
if reply == 0:
if self.plugin.is_mobile_paired():
del self.plugin.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY]
del self.plugin.digitalbitbox_config[CHANNEL_ID_KEY]
elif reply == 1:
# import pairing from dbb app
self.plugin.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY] = dbb_config[ENCRYPTION_PRIVKEY_KEY]
self.plugin.digitalbitbox_config[CHANNEL_ID_KEY] = dbb_config[CHANNEL_ID_KEY]
self.plugin.config.set_key('digitalbitbox', self.plugin.digitalbitbox_config)
def dbb_generate_wallet(self):
key = self.stretch_key(self.password)
filename = ("Butcoin-Electrum-" + time.strftime("%Y-%m-%d-%H-%M-%S") + ".pdf")
msg = ('{"seed":{"source": "create", "key": "%s", "filename": "%s", "entropy": "%s"}}' % (key, filename, to_hexstr(os.urandom(32)))).encode('utf8')
reply = self.hid_send_encrypt(msg)
if 'error' in reply:
raise UserFacingException(reply['error']['message'])
def dbb_erase(self):
self.handler.show_message(_("Are you sure you want to erase the Digital Bitbox?") + "\n\n" +
_("To continue, touch the Digital Bitbox's light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the light or wait for the timeout."))
hid_reply = self.hid_send_encrypt(b'{"reset":"__ERASE__"}')
self.handler.finished()
if 'error' in hid_reply:
raise UserFacingException(hid_reply['error']['message'])
else:
self.password = None
raise UserFacingException('Device erased')
def dbb_load_backup(self, show_msg=True):
backups = self.hid_send_encrypt(b'{"backup":"list"}')
if 'error' in backups:
raise UserFacingException(backups['error']['message'])
f = self.handler.query_choice(_("Choose a backup file:"), backups['backup'])
if f is None:
return False # user cancelled
key = self.backup_password_dialog()
if key is None:
raise Exception('Canceled by user')
key = self.stretch_key(key)
if show_msg:
self.handler.show_message(_("Loading backup...") + "\n\n" +
_("To continue, touch the Digital Bitbox's light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the light or wait for the timeout."))
msg = ('{"seed":{"source": "backup", "key": "%s", "filename": "%s"}}' % (key, backups['backup'][f])).encode('utf8')
hid_reply = self.hid_send_encrypt(msg)
self.handler.finished()
if 'error' in hid_reply:
raise UserFacingException(hid_reply['error']['message'])
return True
@runs_in_hwd_thread
def hid_send_frame(self, data):
HWW_CID = 0xFF000000
HWW_CMD = 0x80 + 0x40 + 0x01
data_len = len(data)
seq = 0;
idx = 0;
write = []
while idx < data_len:
if idx == 0:
# INIT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 7)]
self.dbb_hid.write(b'\0' + struct.pack(">IBH", HWW_CID, HWW_CMD, data_len & 0xFFFF) + write + b'\xEE' * (self.usbReportSize - 7 - len(write)))
else:
# CONT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 5)]
self.dbb_hid.write(b'\0' + struct.pack(">IB", HWW_CID, seq) + write + b'\xEE' * (self.usbReportSize - 5 - len(write)))
seq += 1
idx += len(write)
@runs_in_hwd_thread
def hid_read_frame(self):
# INIT response
read = bytearray(self.dbb_hid.read(self.usbReportSize))
cid = ((read[0] * 256 + read[1]) * 256 + read[2]) * 256 + read[3]
cmd = read[4]
data_len = read[5] * 256 + read[6]
data = read[7:]
idx = len(read) - 7;
while idx < data_len:
# CONT response
read = bytearray(self.dbb_hid.read(self.usbReportSize))
data += read[5:]
idx += len(read) - 5
return data
@runs_in_hwd_thread
def hid_send_plain(self, msg):
reply = ""
try:
serial_number = self.dbb_hid.get_serial_number_string()
if "v2.0." in serial_number or "v1." in serial_number:
hidBufSize = 4096
self.dbb_hid.write('\0' + msg + '\0' * (hidBufSize - len(msg)))
r = bytearray()
while len(r) < hidBufSize:
r += bytearray(self.dbb_hid.read(hidBufSize))
else:
self.hid_send_frame(msg)
r = self.hid_read_frame()
r = r.rstrip(b' \t\r\n\0')
r = r.replace(b"\0", b'')
r = to_string(r, 'utf8')
reply = json.loads(r)
except Exception as e:
_logger.info(f'Exception caught {repr(e)}')
return reply
@runs_in_hwd_thread
def hid_send_encrypt(self, msg):
sha256_byte_len = 32
reply = ""
try:
encryption_key, authentication_key = derive_keys(self.password)
msg = EncodeAES_bytes(encryption_key, msg)
hmac_digest = hmac_oneshot(authentication_key, msg, hashlib.sha256)
authenticated_msg = base64.b64encode(msg + hmac_digest)
reply = self.hid_send_plain(authenticated_msg)
if 'ciphertext' in reply:
b64_unencoded = bytes(base64.b64decode(''.join(reply["ciphertext"])))
reply_hmac = b64_unencoded[-sha256_byte_len:]
hmac_calculated = hmac_oneshot(authentication_key, b64_unencoded[:-sha256_byte_len], hashlib.sha256)
if not hmac.compare_digest(reply_hmac, hmac_calculated):
raise Exception("Failed to validate HMAC")
reply = DecodeAES_bytes(encryption_key, b64_unencoded[:-sha256_byte_len])
reply = to_string(reply, 'utf8')
reply = json.loads(reply)
if 'error' in reply:
self.password = None
except Exception as e:
_logger.info(f'Exception caught {repr(e)}')
return reply
# ----------------------------------------------------------------------------------
#
#
class DigitalBitbox_KeyStore(Hardware_KeyStore):
hw_type = 'digitalbitbox'
device = 'DigitalBitbox'
plugin: 'DigitalBitboxPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
self.force_watching_only = False
self.maxInputs = 14 # maximum inputs per single sign command
def give_error(self, message, clear_client = False):
if clear_client:
self.client = None
raise Exception(message)
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
def sign_message(self, sequence, message, password):
sig = None
try:
message = message.encode('utf8')
inputPath = self.get_derivation_prefix() + "/%d/%d" % sequence
msg_hash = sha256d(msg_magic(message))
inputHash = to_hexstr(msg_hash)
hasharray = []
hasharray.append({'hash': inputHash, 'keypath': inputPath})
hasharray = json.dumps(hasharray)
msg = ('{"sign":{"meta":"sign message", "data":%s}}' % hasharray).encode('utf8')
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception(_("Could not sign message."))
reply = dbb_client.hid_send_encrypt(msg)
self.handler.show_message(_("Signing message ...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.finished()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception(_("Could not sign message."))
if 'recid' in reply['sign'][0]:
# firmware > v2.1.1
sig_string = binascii.unhexlify(reply['sign'][0]['sig'])
recid = int(reply['sign'][0]['recid'], 16)
sig = ecc.construct_sig65(sig_string, recid, True)
pubkey, compressed = ecc.ECPubkey.from_signature65(sig, msg_hash)
addr = public_key_to_p2pkh(pubkey.get_public_key_bytes(compressed=compressed))
if ecc.verify_message_with_address(addr, sig, message) is False:
raise Exception(_("Could not sign message"))
elif 'pubkey' in reply['sign'][0]:
# firmware <= v2.1.1
for recid in range(4):
sig_string = binascii.unhexlify(reply['sign'][0]['sig'])
sig = ecc.construct_sig65(sig_string, recid, True)
try:
addr = public_key_to_p2pkh(binascii.unhexlify(reply['sign'][0]['pubkey']))
if ecc.verify_message_with_address(addr, sig, message):
break
except Exception:
continue
else:
raise Exception(_("Could not sign message"))
except BaseException as e:
self.give_error(e)
return sig
def sign_transaction(self, tx, password):
if tx.is_complete():
return
try:
p2pkhTransaction = True
inputhasharray = []
hasharray = []
pubkeyarray = []
# Build hasharray from inputs
for i, txin in enumerate(tx.inputs()):
if txin.is_coinbase_input():
self.give_error("Coinbase not supported") # should never happen
if txin.script_type != 'p2pkh':
p2pkhTransaction = False
my_pubkey, inputPath = self.find_my_pubkey_in_txinout(txin)
if not inputPath:
self.give_error("No matching pubkey for sign_transaction") # should never happen
inputPath = convert_bip32_intpath_to_strpath(inputPath)
inputHash = sha256d(bfh(tx.serialize_preimage(i)))
hasharray_i = {'hash': to_hexstr(inputHash), 'keypath': inputPath}
hasharray.append(hasharray_i)
inputhasharray.append(inputHash)
# Build pubkeyarray from outputs
for txout in tx.outputs():
assert txout.address
if txout.is_change:
changePubkey, changePath = self.find_my_pubkey_in_txinout(txout)
assert changePath
changePath = convert_bip32_intpath_to_strpath(changePath)
changePubkey = changePubkey.hex()
pubkeyarray_i = {'pubkey': changePubkey, 'keypath': changePath}
pubkeyarray.append(pubkeyarray_i)
# Special serialization of the unsigned transaction for
# the mobile verification app.
# At the moment, verification only works for p2pkh transactions.
if p2pkhTransaction:
tx_copy = copy.deepcopy(tx)
# monkey-patch method of tx_copy instance to change serialization
def input_script(self, txin: PartialTxInput, *, estimate_size=False):
if txin.script_type == 'p2pkh':
return Transaction.get_preimage_script(txin)
raise Exception("unsupported type %s" % txin.script_type)
tx_copy.input_script = input_script.__get__(tx_copy, PartialTransaction)
tx_dbb_serialized = tx_copy.serialize_to_network()
else:
# We only need this for the signing echo / verification.
tx_dbb_serialized = None
# Build sign command
dbb_signatures = []
steps = math.ceil(1.0 * len(hasharray) / self.maxInputs)
for step in range(int(steps)):
hashes = hasharray[step * self.maxInputs : (step + 1) * self.maxInputs]
msg = {
"sign": {
"data": hashes,
"checkpub": pubkeyarray,
},
}
if tx_dbb_serialized is not None:
msg["sign"]["meta"] = to_hexstr(sha256d(tx_dbb_serialized))
msg = json.dumps(msg).encode('ascii')
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign transaction.")
reply = dbb_client.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'echo' not in reply:
raise Exception("Could not sign transaction.")
if self.plugin.is_mobile_paired() and tx_dbb_serialized is not None:
reply['tx'] = tx_dbb_serialized
self.plugin.comserver_post_notification(reply)
if steps > 1:
self.handler.show_message(_("Signing large transaction. Please be patient ...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + " " +
_("(Touch {} of {})").format((step + 1), steps) + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout.") + "\n\n")
else:
self.handler.show_message(_("Signing transaction...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout."))
# Send twice, first returns an echo for smart verification
reply = dbb_client.hid_send_encrypt(msg)
self.handler.finished()
if 'error' in reply:
if reply["error"].get('code') in (600, 601):
# aborted via LED short touch or timeout
raise UserCancelled()
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign transaction.")
dbb_signatures.extend(reply['sign'])
# Fill signatures
if len(dbb_signatures) != len(tx.inputs()):
raise Exception("Incorrect number of transactions signed.") # Should never occur
for i, txin in enumerate(tx.inputs()):
for pubkey_bytes in txin.pubkeys:
if txin.is_complete():
break
signed = dbb_signatures[i]
if 'recid' in signed:
# firmware > v2.1.1
recid = int(signed['recid'], 16)
s = binascii.unhexlify(signed['sig'])
h = inputhasharray[i]
pk = ecc.ECPubkey.from_sig_string(s, recid, h)
pk = pk.get_public_key_hex(compressed=True)
elif 'pubkey' in signed:
# firmware <= v2.1.1
pk = signed['pubkey']
if pk != pubkey_bytes.hex():
continue
sig_r = int(signed['sig'][:64], 16)
sig_s = int(signed['sig'][64:], 16)
sig = ecc.der_sig_from_r_and_s(sig_r, sig_s)
sig = to_hexstr(sig) + '01'
tx.add_signature_to_txin(txin_idx=i, signing_pubkey=pubkey_bytes.hex(), sig=sig)
except UserCancelled:
raise
except BaseException as e:
self.give_error(e, True)
else:
_logger.info(f"Transaction is_complete {tx.is_complete()}")
class DigitalBitboxPlugin(HW_PluginBase):
libraries_available = DIGIBOX
keystore_class = DigitalBitbox_KeyStore
client = None
DEVICE_IDS = [
(0x03eb, 0x2402) # Digital Bitbox
]
SUPPORTED_XTYPES = ('standard', )
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.digitalbitbox_config = self.config.get('digitalbitbox', {})
@runs_in_hwd_thread
def get_dbb_device(self, device):
dev = hid.device()
dev.open_path(device.path)
return dev
def create_client(self, device, handler):
if device.interface_number == 0 or device.usage_page == 0xffff:
if handler:
self.handler = handler
client = self.get_dbb_device(device)
if client is not None:
client = DigitalBitbox_Client(self, client)
return client
else:
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if purpose == HWD_SETUP_NEW_WALLET:
client.setupRunning = True
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m/44'/5'", 'standard'))
return client
def is_mobile_paired(self):
return ENCRYPTION_PRIVKEY_KEY in self.digitalbitbox_config
def comserver_post_notification(self, payload):
assert self.is_mobile_paired(), "unexpected mobile pairing error"
url = 'https://digitalbitbox.com/smartverification/index.php'
key_s = base64.b64decode(self.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY])
args = 'c=data&s=0&dt=0&uuid=%s&pl=%s' % (
self.digitalbitbox_config[CHANNEL_ID_KEY],
EncodeAES_base64(key_s, json.dumps(payload).encode('ascii')).decode('ascii'),
)
try:
text = Network.send_http_on_proxy('post', url, body=args.encode('ascii'), headers={'content-type': 'application/x-www-form-urlencoded'})
_logger.info(f'digitalbitbox reply from server {text}')
except Exception as e:
self.handler.show_error(repr(e)) # repr because str(Exception()) == ''
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
if is_all_public_derivation(derivation):
raise Exception(f"The {self.device} does not reveal xpubs corresponding to non-hardened paths. (path: {derivation})")
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.check_device_dialog()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True):
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.check_device_dialog()
return client
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
if not self.is_mobile_paired():
keystore.handler.show_error(_('This function is only available after pairing your {} with a mobile device.').format(self.device))
return
if wallet.get_txin_type(address) != 'p2pkh':
keystore.handler.show_error(_('This function is only available for p2pkh keystores when using {}.').format(self.device))
return
change, index = wallet.get_address_index(address)
keypath = '%s/%d/%d' % (keystore.get_derivation_prefix(), change, index)
xpub = self.get_client(keystore)._get_xpub(keypath)
verify_request_payload = {
"type": 'p2pkh',
"echo": xpub['echo'],
}
self.comserver_post_notification(verify_request_payload)
|
# -*- coding: utf-8 -*-
from .basetypes import WidgetParameterItem, SimpleParameter
from ...Qt import QtCore
from ...colormap import ColorMap
from ...widgets.GradientWidget import GradientWidget
class ColorMapParameterItem(WidgetParameterItem):
"""Registered parameter type which displays a :class:`GradientWidget <pyqtgraph.GradientWidget>`"""
def makeWidget(self):
w = GradientWidget(orientation='bottom')
w.sizeHint = lambda: QtCore.QSize(300, 35)
w.sigChanged = w.sigGradientChangeFinished
w.sigChanging = w.sigGradientChanged
w.value = w.colorMap
w.setValue = w.setColorMap
self.hideWidget = False
self.asSubItem = True
return w
class ColorMapParameter(SimpleParameter):
itemClass = ColorMapParameterItem
def _interpretValue(self, v):
if v is not None and not isinstance(v, ColorMap):
raise TypeError("Cannot set colormap parameter from object %r" % v)
return v
|
#!/usr/bin/python3
"""Test BaseModel for expected behavior and documentation"""
from datetime import datetime
import inspect
import models
import pep8 as pycodestyle
import time
import unittest
from unittest import mock
BaseModel = models.base_model.BaseModel
module_doc = models.base_model.__doc__
class TestBaseModelDocs(unittest.TestCase):
"""Tests to check the documentation and style of BaseModel class"""
@classmethod
def setUpClass(self):
"""Set up for docstring tests"""
self.base_funcs = inspect.getmembers(BaseModel, inspect.isfunction)
def test_pep8_conformance(self):
"""Test that models/base_model.py conforms to PEP8."""
for path in ['models/base_model.py',
'tests/test_models/test_base_model.py']:
with self.subTest(path=path):
errors = pycodestyle.Checker(path).check_all()
self.assertEqual(errors, 0)
def test_module_docstring(self):
"""Test for the existence of module docstring"""
self.assertIsNot(module_doc, None,
"base_model.py needs a docstring")
self.assertTrue(len(module_doc) > 1,
"base_model.py needs a docstring")
def test_class_docstring(self):
"""Test for the BaseModel class docstring"""
self.assertIsNot(BaseModel.__doc__, None,
"BaseModel class needs a docstring")
self.assertTrue(len(BaseModel.__doc__) >= 1,
"BaseModel class needs a docstring")
def test_func_docstrings(self):
"""Test for the presence of docstrings in BaseModel methods"""
for func in self.base_funcs:
with self.subTest(function=func):
self.assertIsNot(
func[1].__doc__,
None,
"{:s} method needs a docstring".format(func[0])
)
self.assertTrue(
len(func[1].__doc__) > 1,
"{:s} method needs a docstring".format(func[0])
)
class TestBaseModel(unittest.TestCase):
"""Test the BaseModel class"""
@mock.patch('models.storage')
def test_instantiation(self, mock_storage):
"""Test that object is correctly created"""
inst = BaseModel()
self.assertIs(type(inst), BaseModel)
inst.name = "Holberton"
inst.number = 89
attrs_types = {
"id": str,
"created_at": datetime,
"updated_at": datetime,
"name": str,
"number": int
}
for attr, typ in attrs_types.items():
with self.subTest(attr=attr, typ=typ):
self.assertIn(attr, inst.__dict__)
self.assertIs(type(inst.__dict__[attr]), typ)
self.assertTrue(mock_storage.new.called)
self.assertEqual(inst.name, "Holberton")
self.assertEqual(inst.number, 89)
def test_datetime_attributes(self):
"""Test that two BaseModel instances have different datetime objects
and that upon creation have identical updated_at and created_at
value."""
tic = datetime.now()
inst1 = BaseModel()
toc = datetime.now()
self.assertTrue(tic <= inst1.created_at <= toc)
time.sleep(1e-4)
tic = datetime.now()
inst2 = BaseModel()
toc = datetime.now()
self.assertTrue(tic <= inst2.created_at <= toc)
self.assertEqual(inst1.created_at, inst1.updated_at)
self.assertEqual(inst2.created_at, inst2.updated_at)
self.assertNotEqual(inst1.created_at, inst2.created_at)
self.assertNotEqual(inst1.updated_at, inst2.updated_at)
def test_uuid(self):
"""Test that id is a valid uuid"""
inst1 = BaseModel()
inst2 = BaseModel()
for inst in [inst1, inst2]:
uuid = inst.id
with self.subTest(uuid=uuid):
self.assertIs(type(uuid), str)
self.assertRegex(uuid,
'^[0-9a-f]{8}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{12}$')
self.assertNotEqual(inst1.id, inst2.id)
def test_to_dict(self):
"""Test conversion of object attributes to dictionary for json"""
my_model = BaseModel()
my_model.name = "Holberton"
my_model.my_number = 89
d = my_model.to_dict()
expected_attrs = ["id",
"created_at",
"updated_at",
"name",
"my_number",
"__class__"]
self.assertCountEqual(d.keys(), expected_attrs)
self.assertEqual(d['__class__'], 'BaseModel')
self.assertEqual(d['name'], "Holberton")
self.assertEqual(d['my_number'], 89)
def test_to_dict_values(self):
"""test that values in dict returned from to_dict are correct"""
t_format = "%Y-%m-%dT%H:%M:%S.%f"
bm = BaseModel()
new_d = bm.to_dict()
self.assertEqual(new_d["__class__"], "BaseModel")
self.assertEqual(type(new_d["created_at"]), str)
self.assertEqual(type(new_d["updated_at"]), str)
self.assertEqual(new_d["created_at"], bm.created_at.strftime(t_format))
self.assertEqual(new_d["updated_at"], bm.updated_at.strftime(t_format))
def test_str(self):
"""test that the str method has the correct output"""
inst = BaseModel()
string = "[BaseModel] ({}) {}".format(inst.id, inst.__dict__)
self.assertEqual(string, str(inst))
@mock.patch('models.storage')
def test_save(self, mock_storage):
"""Test that save method updates `updated_at` and calls
`storage.save`"""
inst = BaseModel()
old_created_at = inst.created_at
old_updated_at = inst.updated_at
inst.save()
new_created_at = inst.created_at
new_updated_at = inst.updated_at
self.assertNotEqual(old_updated_at, new_updated_at)
self.assertEqual(old_created_at, new_created_at)
self.assertTrue(mock_storage.save.called)
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.hardening_status_status import HardeningStatusStatus
class TestHardeningStatusStatus(unittest.TestCase):
""" HardeningStatusStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testHardeningStatusStatus(self):
"""
Test HardeningStatusStatus
"""
model = swagger_client.models.hardening_status_status.HardeningStatusStatus()
if __name__ == '__main__':
unittest.main()
|
import argparse
from myo_sign_language.data_saver.myo_connector import run as myo_listen
from myo_sign_language.data_saver.recorder import run as run_recording
default_port = 3002
def check_positive_integer(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
parser = argparse.ArgumentParser(
description='Run server to get data by OSC from MYO '
'and server that listen on data from osc server and save it on disk')
parser.add_argument('-a', '--address', dest='address', type=check_positive_integer,
help='Write down numbers that represent port where is data sended,'
f' if not defined default is {default_port}')
parser.add_argument('-m', '--myo', dest='myo', action='store_true',
help='listen on myo data and send it to defined port in -a')
parser.add_argument('-r', '--recording', dest='recording', action='store_true',
help='listen on osc server which sends data to the port defined on -a')
def get_port(user_args):
if user_args.address:
return user_args.address
else:
return default_port
def run_server_by_params(port, user_args):
if user_args.myo:
try:
myo_listen(port)
except OSError as error:
print(error)
elif user_args.recording:
try:
run_recording(port)
except OSError:
print(f"Please use another port than {port}"
f" to listen on data from myo served by osc server. Because this one is busy.")
else:
print('Missing definition which one script you would like to run')
parser.print_help()
if __name__ == '__main__':
user_args = parser.parse_args()
port = get_port(user_args)
run_server_by_params(port, user_args)
|
import json
import httplib
def user_request(access_token):
method = "GET"
endpoint = "api.github.com"
url = "/user"
headers = {
"Authorization": "token " + access_token, # https://developer.github.com/v3/#oauth2-token-sent-in-a-header
"Content-Type": "application/json",
"User-Agent": "Localehub" # https://developer.github.com/v3/#user-agent-required
}
print(method, endpoint + url, 'token ' + access_token)
conn = httplib.HTTPSConnection(endpoint)
conn.request(method, url, None, headers)
return conn.getresponse()
def lambda_handler(event, context):
github_token = event['requestContext']['authorizer']['githob']
response = user_request(github_token)
status = response.status
data = json.loads(json.dumps(response.read()))
#print(response.status, response.reason) # 200 OK
#print(response.read())
return {
"statusCode": status,
"body": data,
"headers": {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json"
}
}
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferry.lib.scheduler.task import Task
class EndTask(Task):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
super(EndTask, self).__init__()
|
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class StopWatch(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created': 'datetime',
'duration': 'str',
'issue_index': 'int',
'issue_title': 'str',
'repo_name': 'str',
'repo_owner_name': 'str',
'seconds': 'int'
}
attribute_map = {
'created': 'created',
'duration': 'duration',
'issue_index': 'issue_index',
'issue_title': 'issue_title',
'repo_name': 'repo_name',
'repo_owner_name': 'repo_owner_name',
'seconds': 'seconds'
}
def __init__(self, created=None, duration=None, issue_index=None, issue_title=None, repo_name=None, repo_owner_name=None, seconds=None): # noqa: E501
"""StopWatch - a model defined in Swagger""" # noqa: E501
self._created = None
self._duration = None
self._issue_index = None
self._issue_title = None
self._repo_name = None
self._repo_owner_name = None
self._seconds = None
self.discriminator = None
if created is not None:
self.created = created
if duration is not None:
self.duration = duration
if issue_index is not None:
self.issue_index = issue_index
if issue_title is not None:
self.issue_title = issue_title
if repo_name is not None:
self.repo_name = repo_name
if repo_owner_name is not None:
self.repo_owner_name = repo_owner_name
if seconds is not None:
self.seconds = seconds
@property
def created(self):
"""Gets the created of this StopWatch. # noqa: E501
:return: The created of this StopWatch. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this StopWatch.
:param created: The created of this StopWatch. # noqa: E501
:type: datetime
"""
self._created = created
@property
def duration(self):
"""Gets the duration of this StopWatch. # noqa: E501
:return: The duration of this StopWatch. # noqa: E501
:rtype: str
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this StopWatch.
:param duration: The duration of this StopWatch. # noqa: E501
:type: str
"""
self._duration = duration
@property
def issue_index(self):
"""Gets the issue_index of this StopWatch. # noqa: E501
:return: The issue_index of this StopWatch. # noqa: E501
:rtype: int
"""
return self._issue_index
@issue_index.setter
def issue_index(self, issue_index):
"""Sets the issue_index of this StopWatch.
:param issue_index: The issue_index of this StopWatch. # noqa: E501
:type: int
"""
self._issue_index = issue_index
@property
def issue_title(self):
"""Gets the issue_title of this StopWatch. # noqa: E501
:return: The issue_title of this StopWatch. # noqa: E501
:rtype: str
"""
return self._issue_title
@issue_title.setter
def issue_title(self, issue_title):
"""Sets the issue_title of this StopWatch.
:param issue_title: The issue_title of this StopWatch. # noqa: E501
:type: str
"""
self._issue_title = issue_title
@property
def repo_name(self):
"""Gets the repo_name of this StopWatch. # noqa: E501
:return: The repo_name of this StopWatch. # noqa: E501
:rtype: str
"""
return self._repo_name
@repo_name.setter
def repo_name(self, repo_name):
"""Sets the repo_name of this StopWatch.
:param repo_name: The repo_name of this StopWatch. # noqa: E501
:type: str
"""
self._repo_name = repo_name
@property
def repo_owner_name(self):
"""Gets the repo_owner_name of this StopWatch. # noqa: E501
:return: The repo_owner_name of this StopWatch. # noqa: E501
:rtype: str
"""
return self._repo_owner_name
@repo_owner_name.setter
def repo_owner_name(self, repo_owner_name):
"""Sets the repo_owner_name of this StopWatch.
:param repo_owner_name: The repo_owner_name of this StopWatch. # noqa: E501
:type: str
"""
self._repo_owner_name = repo_owner_name
@property
def seconds(self):
"""Gets the seconds of this StopWatch. # noqa: E501
:return: The seconds of this StopWatch. # noqa: E501
:rtype: int
"""
return self._seconds
@seconds.setter
def seconds(self, seconds):
"""Sets the seconds of this StopWatch.
:param seconds: The seconds of this StopWatch. # noqa: E501
:type: int
"""
self._seconds = seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StopWatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StopWatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from .erd_cycle_state import ErdCycleState, ErdCycleStateRaw
CYCLE_STATE_RAW_MAP = {
ErdCycleStateRaw.PREWASH: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.PREWASH1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START2: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START3: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.END_PREWASH1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.SENSING: ErdCycleState.SENSING,
ErdCycleStateRaw.MAIN_WASH: ErdCycleState.MAIN_WASH,
ErdCycleStateRaw.DIVERTER_CAL: ErdCycleState.MAIN_WASH,
ErdCycleStateRaw.DRYING: ErdCycleState.DRYING,
ErdCycleStateRaw.SANITIZING: ErdCycleState.SANITIZING,
ErdCycleStateRaw.RINSING: ErdCycleState.RINSING,
ErdCycleStateRaw.TURNIDITY_CAL: ErdCycleState.RINSING,
ErdCycleStateRaw.FINAL_RINSE: ErdCycleState.RINSING,
ErdCycleStateRaw.FINAL_RINSE_FILL: ErdCycleState.RINSING,
ErdCycleStateRaw.PAUSE: ErdCycleState.PAUSE,
ErdCycleStateRaw.STATE_17: ErdCycleState.NA,
ErdCycleStateRaw.STATE_18: ErdCycleState.NA,
ErdCycleStateRaw.CYCLE_INACTIVE: ErdCycleState.NA,
ErdCycleStateRaw.MAX: ErdCycleState.NA,
ErdCycleStateRaw.INVALID: ErdCycleState.NA
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import argparse
import imp
import logging
import sys
import six
from lxml import etree
from . import namespaces as ns, xsd
from .py2xsd import generate_xsdspec
from .soap import SOAP_HTTP_Transport
from .utils import uncapitalize
from .wsdl import get_wsdl_classes
logger = logging.getLogger('soapfish')
# --- Helpers -----------------------------------------------------------------
def build_service(wsdl, definitions, service):
wsdl_port = wsdl.Port()
wsdl_port.name = service.name + 'Port'
wsdl_port.binding = 'tns:' + service.name + 'Binding'
wsdl_port.address = wsdl.SOAP_Address(location=service.location)
wsdl_service = wsdl.Service()
wsdl_service.name = service.name
wsdl_service.ports.append(wsdl_port)
definitions.services.append(wsdl_service)
def build_bindings(wsdl, definitions, service):
binding = wsdl.Binding()
binding.name = service.name + 'Binding'
binding.type = 'tns:' + service.name + 'PortType'
binding.binding = wsdl.SOAP_Binding()
binding.binding.style = 'document'
binding.binding.transport = SOAP_HTTP_Transport
for method in service.methods:
operation = wsdl.Operation()
operation.name = method.operationName
operation.operation = wsdl.SOAP_Operation()
operation.operation.soapAction = method.soapAction
operation.input = wsdl.Input(body=wsdl.SOAP_Body(use='literal'))
operation.output = wsdl.Output(body=wsdl.SOAP_Body(use='literal'))
operation.operation.style = method.style
binding.operations.append(operation)
definitions.bindings.append(binding)
def build_portTypes(wsdl, definitions, service):
portType = wsdl.PortType()
portType.name = service.name + 'PortType'
for method in service.methods:
operation = wsdl.Operation()
operation.name = method.operationName
operation.input = wsdl.Input(message='tns:' + method.operationName + 'Input')
operation.output = wsdl.Output(message='tns:' + method.operationName + 'Output')
portType.operations.append(operation)
definitions.portTypes.append(portType)
def build_messages(wsdl, definitions, service):
for method in service.methods:
inputMessage = wsdl.Message(name=method.operationName + 'Input')
part = wsdl.Part(name='body')
if isinstance(method.input, six.string_types):
part.element = 'sns:' + method.input
else:
part.type = 'sns:' + uncapitalize(method.input.__name__)
inputMessage.parts = [part]
definitions.messages.append(inputMessage)
outputMessage = wsdl.Message(name=method.operationName + 'Output')
part = wsdl.Part(name='body')
if isinstance(method.output, six.string_types):
part.element = 'sns:' + method.output
else:
part.type = 'sns:' + uncapitalize(method.output.__name__)
outputMessage.parts = [part]
definitions.messages.append(outputMessage)
def build_types(wsdl, definitions, service):
schemas = [generate_xsdspec(schema) for schema in service.schemas]
definitions.types = wsdl.Types(schemas=schemas)
def generate_wsdl(service):
wsdl = get_wsdl_classes(service.version.BINDING_NAMESPACE)
definitions = wsdl.Definitions(targetNamespace=service.targetNamespace)
build_types(wsdl, definitions, service)
build_service(wsdl, definitions, service)
build_bindings(wsdl, definitions, service)
build_portTypes(wsdl, definitions, service)
build_messages(wsdl, definitions, service)
xmlelement = etree.Element(
'{%s}definitions' % ns.wsdl,
nsmap={
# FIXME: Look up properly if multiple schemas...
'sns': service.schemas[0].targetNamespace,
'soap': service.version.BINDING_NAMESPACE,
'tns': service.targetNamespace,
'wsdl': ns.wsdl,
'xsd': ns.xsd,
},
)
definitions.render(xmlelement,
definitions,
namespace=ns.wsdl,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED)
return xmlelement
# --- Program -----------------------------------------------------------------
def main(argv=None):
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Generates a WSDL document from a Python module.',
)
parser.add_argument('module', help='The path to a python module.')
parser.add_argument('output', help='Output path for WSDL document.',
nargs='?', type=argparse.FileType('wb'), default=stdout)
opt = parser.parse_args(sys.argv[1:] if argv is None else argv)
logger.info('Generating WSDL for Python module: %s' % opt.module)
module = imp.load_source('', opt.module)
tree = generate_wsdl(getattr(module, 'SERVICE'))
opt.output.write(etree.tostring(tree, pretty_print=True))
return 0
if __name__ == '__main__':
sys.exit(main())
|
''' Program to connect with database and store record of employee and display records.
'''
from sqlTor import SqlTor
import mysql.connector
from mysql.connector import errorcode
from tabulate import tabulate
from utils import clear_screen
def input_employee_details():
while True:
try:
name = input('name: ')
assert 5 < len(name) < 20
department = input('department: ')
assert len(department) < 20
salary = int(input('salary: '))
assert salary >= 0
except Exception as err:
print(f'Please enter valid details. {err}')
else:
break
return name, department, salary
def input_emp_id():
while True:
try:
emp_id = int(input('Enter employee id: '))
except ValueError:
print('Invalid Employee id. It must be integer.')
else:
break
return emp_id
def create_table(cursor):
''' Takes the cursor object and creates table '''
table_creation = ("CREATE TABLE employees(\
emp_id integer NOT NULL PRIMARY KEY,\
name char(20) NOT NULL,\
department char(20) NOT NULL,\
salary integer NOT NULL);")
try:
cursor.execute(table_creation)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('table already exists')
else:
print(err)
else:
print('Created table `employees` successfully')
def display_all(cursor):
''' Display all employees '''
query = "SELECT * FROM employees"
try:
cursor.execute(query)
except Exception as err:
print(err)
else:
employees = cursor.fetchall()
if employees:
print(f'''\n\nHere is the list of all employees
\n{tabulate(employees,tablefmt='fancy_grid',headers=['emp_id','name','department','salary'])}\n''')
else:
print('No employees recorded yet')
def record_new(cursor):
''' Record a new employee '''
print('Enter the details to add new employee.\n')
emp_id = input_emp_id()
name, department, salary = input_employee_details()
insert_employee = f"INSERT INTO employees \
VALUES({emp_id},\
'{name}','{department}',{salary})"
try:
cursor.execute(insert_employee)
except Exception as err:
if err.errno == errorcode.ER_DUP_ENTRY:
print('Duplicate entry. emp_id must be unique.')
else:
print('New employee added successfully 😃')
if __name__ == "__main__":
with SqlTor() as my_con:
cursor = my_con.cursor()
create_table(cursor)
while True:
clear_screen()
display_all(cursor)
print('RECORD NEW EMPLOYEES')
record_new(cursor)
my_con.commit()
|
# coding=utf-8
import threading
import traceback
from config import redis
from dark_listener import BaseListener
from dark_listener.BaseOperation import validate
from dark_listener.ListenerManagerLauncher import listener_manager_launcher
from lib.Logger import log
rlock = threading.RLock()
def lock(func):
def wrapper(*args, **kwargs):
rlock.acquire()
try:
return func(*args, **kwargs)
except:
log.error(traceback.format_exc())
raise
finally:
rlock.release()
return wrapper
class DarkListenerManager():
def __init__(self, listener_name):
self.listener_name = listener_name
self.listeners = {}
def get_dark_listener_session_name(self, user_id: str, chatbot_user_id: str):
return 'tianhao:dark_buddy:dark_listener:{0}:{1}'.format(self.listener_name,
user_id + '-*-' + chatbot_user_id)
def get_listener_session_choices(self, user_id: str, chatbot_user_id: str):
choices = redis.get(self.get_dark_listener_session_name(user_id, chatbot_user_id))
if not choices:
return None
return eval(redis.get(self.get_dark_listener_session_name(user_id, chatbot_user_id)).decode())
def clear_listener_session_choices(self, user_id: str, chatbot_user_id: str):
redis.delete(self.get_dark_listener_session_name(user_id, chatbot_user_id))
@lock
def put_new_listener(self, dark_listener: BaseListener) -> None:
tenant_id = dark_listener.chatbot_user_id
user_id = dark_listener.user_id
listeners_dict = self.listeners.get(tenant_id, {})
listeners_dict[user_id] = dark_listener
self.listeners[tenant_id] = listeners_dict
# 注册租户当前焦点
listener_manager_launcher.set_current_listener_manager(user_id, tenant_id, self)
dark_listener.initialize()
@lock
def get_listener(self, user_id: str, tenant_id: str) -> BaseListener:
listener_dict = self.listeners.get(tenant_id)
if not listener_dict:
return None
return listener_dict.get(user_id, None)
@lock
def delete(self, user_id, chatbot_user_id):
related_listener = self.get_listener(user_id, chatbot_user_id)
if related_listener:
redis.delete(self.get_dark_listener_session_name(user_id, chatbot_user_id))
related_listener.alive = False
del related_listener
@lock
def listen(self, request_json: dict) -> bool:
user_id = request_json['senderId']
chatbot_user_id = request_json['chatbotUserId']
related_listener = self.get_listener(user_id, chatbot_user_id)
if not related_listener:
return False
answer = request_json["text"]["content"].strip()
listen_words = self.get_listener_session_choices(user_id, chatbot_user_id)
if not listen_words:
return False
matched = validate(answer, listen_words)
if matched:
self.clear_listener_session_choices(user_id, chatbot_user_id)
related_listener.current_request = request_json
related_listener.current_answer = answer
return True
else:
return False
def get_listener_name(self):
return self.listener_name
|
import sys as _sys
from loguru import logger
from tqdm import tqdm as _tqdm
_sys.stdout.reconfigure(encoding='utf-8', errors='backslashreplace')
logger.remove() # removes the default console logger provided by Loguru.
# I find it to be too noisy with details more appropriate for file logging.
# INFO and messages of higher priority only shown on the console.
logger.add(lambda msg: _tqdm.write(msg, end=""), format="{message}", level="INFO")
# This creates a logging sink and handler that puts all messages at or above the TRACE level into a logfile for each run.
logger.add("file_{time}.log", level="TRACE", encoding="utf8") # Unicode instructions needed to avoid file write errors.
@logger.catch(
message=
"WHHoopssiee! Looks like script crashed! This shouldn't happen, although it often does haha :P\n"
"Most of the times, you should cut out the last printed file (it should be down there somehwere) "
"to some other folder, and continue\n"
"\n"
"If this doesn't help, and it keeps doing this after many cut-outs, you can check out issues tab:\n"
"https://github.com/TheLastGimbus/GooglePhotosTakeoutHelper/issues \n"
"to see if anyone has similar issue, or contact me other way:\n"
"https://github.com/TheLastGimbus/GooglePhotosTakeoutHelper/blob/master/README.md#contacterrors \n",
# Still tell the system that something bad happened
onerror=lambda e: _sys.exit(1)
) # wraps entire function in a trap to display enhanced error tracebaks after an exception occurs.
def main():
import argparse as _argparse
import json as _json
import os as _os
import re as _re
import shutil as _shutil
import hashlib as _hashlib
import functools as _functools
from collections import defaultdict as _defaultdict
from datetime import datetime as _datetime
from datetime import timedelta as _timedelta
from pathlib import Path as Path
try:
from google_photos_takeout_helper.__version__ import __version__
except ModuleNotFoundError:
from __version__ import __version__
import piexif as _piexif
from fractions import Fraction # piexif requires some values to be stored as rationals
import math
if _os.name == 'nt':
import win32_setctime as _windoza_setctime
parser = _argparse.ArgumentParser(
prog='Google Photos Takeout Helper',
usage='google-photos-takeout-helper -i [INPUT TAKEOUT FOLDER] -o [OUTPUT FOLDER]',
description=
"""This script takes all of your photos from Google Photos takeout,
fixes their exif DateTime data (when they were taken) and file creation date,
and then copies it all to one folder.
""",
)
parser.add_argument('--version', action='version', version=f"%(prog)s {__version__}")
parser.add_argument(
'-i', '--input-folder',
type=str,
required=True,
help='Input folder with all stuff from Google Photos takeout zip(s)'
)
parser.add_argument(
'-o', '--output-folder',
type=str,
required=False,
default='ALL_PHOTOS',
help='Output folders which in all photos will be placed in'
)
parser.add_argument(
'--skip-extras',
action='store_true',
help='EXPERIMENTAL: Skips the extra photos like photos that end in "edited" or "EFFECTS".'
)
parser.add_argument(
'--skip-extras-harder', # Oh yeah, skip my extras harder daddy
action='store_true',
help='EXPERIMENTAL: Skips the extra photos like photos like pic(1). Also includes --skip-extras.'
)
parser.add_argument(
"--divide-to-dates",
action='store_true',
help="Create folders and subfolders based on the date the photos were taken"
)
parser.add_argument(
'--albums',
type=str,
help="EXPERIMENTAL, MAY NOT WORK FOR EVERYONE: What kind of 'albums solution' you would like:\n"
"'json' - written in a json file\n"
)
args = parser.parse_args()
logger.info('Heeeere we go!')
PHOTOS_DIR = Path(args.input_folder)
FIXED_DIR = Path(args.output_folder)
TAG_DATE_TIME_ORIGINAL = _piexif.ExifIFD.DateTimeOriginal
TAG_DATE_TIME_DIGITIZED = _piexif.ExifIFD.DateTimeDigitized
TAG_DATE_TIME = 306
TAG_PREVIEW_DATE_TIME = 50971
photo_formats = ['.jpg', '.jpeg', '.png', '.webp', '.bmp', '.tif', '.tiff', '.svg', '.heic']
video_formats = ['.mp4', '.gif', '.mov', '.webm', '.avi', '.wmv', '.rm', '.mpg', '.mpe', '.mpeg', '.mkv', '.m4v',
'.mts', '.m2ts']
extra_formats = [
'-edited', '-effects', '-smile', '-mix', # EN/US
'-edytowane', # PL
# Add more "edited" flags in more languages if you want. They need to be lowercase.
]
# Album Multimap
album_mmap = _defaultdict(list)
# Duplicate by full hash multimap
files_by_full_hash = _defaultdict(list)
# holds all the renamed files that clashed from their
rename_map = dict()
_all_jsons_dict = _defaultdict(dict)
# Statistics:
s_removed_duplicates_count = 0
s_copied_files = 0
s_cant_insert_exif_files = [] # List of files where inserting exif failed
s_date_from_folder_files = [] # List of files where date was set from folder name
s_skipped_extra_files = [] # List of extra files ("-edited" etc) which were skipped
s_no_json_found = [] # List of files where we couldn't find json
s_no_date_at_all = [] # List of files where there was absolutely no option to set correct date
FIXED_DIR.mkdir(parents=True, exist_ok=True)
def for_all_files_recursive(
dir: Path,
file_function=lambda fi: True,
folder_function=lambda fo: True,
filter_fun=lambda file: True
):
for file in dir.rglob("*"):
if file.is_dir():
folder_function(file)
continue
elif file.is_file():
if filter_fun(file):
file_function(file)
else:
logger.debug(f'Found something weird... {file}')
# This is required, because windoza crashes when timestamp is negative
# https://github.com/joke2k/faker/issues/460#issuecomment-308897287
# This (dynamic assigning a function) mayyy be a little faster than comparing it every time (?)
datetime_from_timestamp = (lambda t: _datetime(1970, 1, 1) + _timedelta(seconds=int(t))) \
if _os.name == 'nt' \
else _datetime.fromtimestamp
timestamp_from_datetime = (lambda dt: (dt - _datetime(1970, 1, 1)).total_seconds()) \
if _os.name == 'nt' \
else _datetime.timestamp
def is_photo(file: Path):
if file.suffix.lower() not in photo_formats:
return False
# skips the extra photo file, like edited or effects. They're kinda useless.
nonlocal s_skipped_extra_files
if args.skip_extras or args.skip_extras_harder: # if the file name includes something under the extra_formats, it skips it.
for extra in extra_formats:
if extra in file.name.lower():
s_skipped_extra_files.append(str(file.resolve()))
return False
if args.skip_extras_harder:
search = r"\(\d+\)\." # we leave the period in so it doesn't catch folders.
if bool(_re.search(search, file.name)):
# PICT0003(5).jpg -> PICT0003.jpg The regex would match "(5).", and replace it with a "."
plain_file = file.with_name(_re.sub(search, '.', str(file)))
# if the original exists, it will ignore the (1) file, ensuring there is only one copy of each file.
if plain_file.is_file():
s_skipped_extra_files.append(str(file.resolve()))
return False
return True
def is_video(file: Path):
if file.suffix.lower() not in video_formats:
return False
return True
def chunk_reader(fobj, chunk_size=1024):
""" Generator that reads a file in chunks of bytes """
while True:
chunk = fobj.read(chunk_size)
if not chunk:
return
yield chunk
def get_hash(file: Path, first_chunk_only=False, hash_algo=_hashlib.sha1):
hashobj = hash_algo()
with open(file, "rb") as f:
if first_chunk_only:
hashobj.update(f.read(1024))
else:
for chunk in chunk_reader(f):
hashobj.update(chunk)
return hashobj.digest()
def populate_album_map(path: Path, filter_fun=lambda f: (is_photo(f) or is_video(f))):
if not path.is_dir():
raise NotADirectoryError('populate_album_map only handles directories not files')
meta_file_exists = find_album_meta_json_file(path)
if meta_file_exists is None or not meta_file_exists.exists():
return False
# means that we are processing an album so process
for file in path.rglob("*"):
if not (file.is_file() and filter_fun(file)):
continue
file_name = file.name
# If it's not in the output folder
if not (FIXED_DIR / file.name).is_file():
full_hash = None
try:
full_hash = get_hash(file, first_chunk_only=False)
except Exception as e:
logger.debug(e)
logger.debug(f"populate_album_map - couldn't get hash of {file}")
if full_hash is not None and full_hash in files_by_full_hash:
full_hash_files = files_by_full_hash[full_hash]
if len(full_hash_files) != 1:
logger.error("full_hash_files list should only be one after duplication removal, bad state")
exit(-5)
return False
file_name = full_hash_files[0].name
# check rename map in case there was an overlap namechange
if str(file) in rename_map:
file_name = rename_map[str(file)].name
album_mmap[file.parent.name].append(file_name)
# PART 3: removing duplicates
# THIS IS PARTLY COPIED FROM STACKOVERFLOW
# https://stackoverflow.com/questions/748675/finding-duplicate-files-and-removing-them
#
# We now use an optimized version linked from tfeldmann
# https://gist.github.com/tfeldmann/fc875e6630d11f2256e746f67a09c1ae
#
# THANK YOU Todor Minakov (https://github.com/tminakov) and Thomas Feldmann (https://github.com/tfeldmann)
#
# NOTE: defaultdict(list) is a multimap, all init array handling is done internally
# See: https://en.wikipedia.org/wiki/Multimap#Python
#
def find_duplicates(path: Path, filter_fun=lambda file: True):
files_by_size = _defaultdict(list)
files_by_small_hash = _defaultdict(list)
for file in path.rglob("*"):
if file.is_file() and filter_fun(file):
try:
file_size = file.stat().st_size
except (OSError, FileNotFoundError):
# not accessible (permissions, etc) - pass on
continue
files_by_size[file_size].append(file)
# For all files with the same file size, get their hash on the first 1024 bytes
logger.info('Calculating small hashes...')
for file_size, files in _tqdm(files_by_size.items(), unit='files-by-size'):
if len(files) < 2:
continue # this file size is unique, no need to spend cpu cycles on it
for file in files:
try:
small_hash = get_hash(file, first_chunk_only=True)
except OSError:
# the file access might've changed till the exec point got here
continue
files_by_small_hash[(file_size, small_hash)].append(file)
# For all files with the hash on the first 1024 bytes, get their hash on the full
# file - if more than one file is inserted on a hash here they are certinly duplicates
logger.info('Calculating full hashes...')
for files in _tqdm(files_by_small_hash.values(), unit='files-by-small-hash'):
if len(files) < 2:
# the hash of the first 1k bytes is unique -> skip this file
continue
for file in files:
try:
full_hash = get_hash(file, first_chunk_only=False)
except OSError:
# the file access might've changed till the exec point got here
continue
files_by_full_hash[full_hash].append(file)
# Removes all duplicates in folder
# ONLY RUN AFTER RUNNING find_duplicates()
def remove_duplicates():
nonlocal s_removed_duplicates_count
# Now we have populated the final multimap of absolute dups, We now can attempt to find the original file
# and remove all the other duplicates
for files in _tqdm(files_by_full_hash.values(), unit='duplicates'):
if len(files) < 2:
continue # this file size is unique, no need to spend cpu cycles on it
s_removed_duplicates_count += len(files) - 1
for file in files:
# TODO reconsider which dup we delete these now that we're searching globally?
if len(files) > 1:
file.unlink()
files.remove(file)
return True
# PART 1: Fixing metadata and date-related stuff
# Returns json dict
def find_json_for_file(file: Path):
parenthesis_regexp = r'\([0-9]+\)'
parenthesis = _re.findall(parenthesis_regexp, file.name)
if len(parenthesis) == 1:
# Fix for files that have as image/video IMG_1234(1).JPG with a json IMG_1234.JPG(1).json
stripped_filename = _re.sub(parenthesis_regexp, '', file.name)
potential_json = file.with_name(stripped_filename + parenthesis[0] + '.json')
else:
potential_json = file.with_name(file.name + '.json')
if potential_json.is_file():
try:
with open(potential_json, 'r') as f:
json_dict = _json.load(f)
return json_dict
except:
raise FileNotFoundError(f"Couldn't find json for file: {file}")
nonlocal _all_jsons_dict
# Check if we need to load this folder
if file.parent not in _all_jsons_dict:
for json_file in file.parent.rglob("*.json"):
try:
with json_file.open('r') as f:
json_dict = _json.load(f)
if "title" in json_dict:
# We found a JSON file with a proper title, store the file name
_all_jsons_dict[file.parent][json_dict["title"]] = json_dict
except:
logger.debug(f"Couldn't open json file {json_file}")
# Check if we have found the JSON file among all the loaded ones in the folder
if file.parent in _all_jsons_dict and file.name in _all_jsons_dict[file.parent]:
# Great we found a valid JSON file in this folder corresponding to this file
return _all_jsons_dict[file.parent][file.name]
else:
nonlocal s_no_json_found
s_no_json_found.append(str(file.resolve()))
raise FileNotFoundError(f"Couldn't find json for file: {file}")
# Returns date in 2019:01:01 23:59:59 format
def get_date_from_folder_meta(dir: Path):
file = find_album_meta_json_file(dir)
if not file:
logger.debug("Couldn't pull datetime from album meta")
return None
try:
with open(str(file), 'r') as fi:
album_dict = _json.load(fi)
# find_album_meta_json_file *should* give us "safe" file
time = int(album_dict["albumData"]["date"]["timestamp"])
return datetime_from_timestamp(time).strftime('%Y:%m:%d %H:%M:%S')
except KeyError:
logger.error(
"get_date_from_folder_meta - json doesn't have required stuff "
"- that probably means that either google fucked us again, or find_album_meta_json_file"
"is seriously broken"
)
return None
@_functools.lru_cache(maxsize=None)
def find_album_meta_json_file(dir: Path):
for file in dir.rglob("*.json"):
try:
with open(str(file), 'r') as f:
dict = _json.load(f)
if "albumData" in dict:
return file
except Exception as e:
logger.debug(e)
logger.debug(f"find_album_meta_json_file - Error opening file: {file}")
return None
def set_creation_date_from_str(file: Path, str_datetime):
try:
# Turns out exif can have different formats - YYYY:MM:DD, YYYY/..., YYYY-... etc
# God wish that americans won't have something like MM-DD-YYYY
# The replace ': ' to ':0' fixes issues when it reads the string as 2006:11:09 10:54: 1.
# It replaces the extra whitespace with a 0 for proper parsing
str_datetime = str_datetime.replace('-', ':').replace('/', ':').replace('.', ':') \
.replace('\\', ':').replace(': ', ':0')[:19]
timestamp = timestamp_from_datetime(
_datetime.strptime(
str_datetime,
'%Y:%m:%d %H:%M:%S'
)
)
_os.utime(file, (timestamp, timestamp))
if _os.name == 'nt':
_windoza_setctime.setctime(str(file), timestamp)
except Exception as e:
raise ValueError(f"Error setting creation date from string: {str_datetime}")
def set_creation_date_from_exif(file: Path):
try:
# Why do you need to be like that, Piexif...
exif_dict = _piexif.load(str(file))
except Exception as e:
raise IOError("Can't read file's exif!")
tags = [['0th', TAG_DATE_TIME], ['Exif', TAG_DATE_TIME_ORIGINAL], ['Exif', TAG_DATE_TIME_DIGITIZED]]
datetime_str = ''
date_set_success = False
for tag in tags:
try:
datetime_str = exif_dict[tag[0]][tag[1]].decode('UTF-8')
set_creation_date_from_str(file, datetime_str)
date_set_success = True
break
except KeyError:
pass # No such tag - continue searching :/
except ValueError:
logger.debug("Wrong date format in exif!")
logger.debug(datetime_str)
logger.debug("does not match '%Y:%m:%d %H:%M:%S'")
if not date_set_success:
raise IOError('No correct DateTime in given exif')
def set_file_exif_date(file: Path, creation_date):
try:
exif_dict = _piexif.load(str(file))
except: # Sorry but Piexif is too unpredictable
exif_dict = {'0th': {}, 'Exif': {}}
creation_date = creation_date.encode('UTF-8')
exif_dict['0th'][TAG_DATE_TIME] = creation_date
exif_dict['Exif'][TAG_DATE_TIME_ORIGINAL] = creation_date
exif_dict['Exif'][TAG_DATE_TIME_DIGITIZED] = creation_date
try:
_piexif.insert(_piexif.dump(exif_dict), str(file))
except Exception as e:
logger.debug("Couldn't insert exif!")
logger.debug(e)
nonlocal s_cant_insert_exif_files
s_cant_insert_exif_files.append(str(file.resolve()))
def get_date_str_from_json(json):
return datetime_from_timestamp(
int(json['photoTakenTime']['timestamp'])
).strftime('%Y:%m:%d %H:%M:%S')
# ========= THIS IS ALL GPS STUFF =========
def change_to_rational(number):
"""convert a number to rantional
Keyword arguments: number
return: tuple like (1, 2), (numerator, denominator)
"""
f = Fraction(str(number))
return f.numerator, f.denominator
# got this here https://github.com/hMatoba/piexifjs/issues/1#issuecomment-260176317
def degToDmsRational(degFloat):
min_float = degFloat % 1 * 60
sec_float = min_float % 1 * 60
deg = math.floor(degFloat)
deg_min = math.floor(min_float)
sec = round(sec_float * 100)
return [(deg, 1), (deg_min, 1), (sec, 100)]
def set_file_geo_data(file: Path, json):
"""
Reads the geoData from google and saves it to the EXIF. This works assuming that the geodata looks like -100.12093, 50.213143. Something like that.
Written by DalenW.
:param file:
:param json:
:return:
"""
# prevents crashes
try:
exif_dict = _piexif.load(str(file))
except:
exif_dict = {'0th': {}, 'Exif': {}}
# converts a string input into a float. If it fails, it returns 0.0
def _str_to_float(num):
if type(num) == str:
return 0.0
else:
return float(num)
# fallbacks to GeoData Exif if it wasn't set in the photos editor.
# https://github.com/TheLastGimbus/GooglePhotosTakeoutHelper/pull/5#discussion_r531792314
longitude = _str_to_float(json['geoData']['longitude'])
latitude = _str_to_float(json['geoData']['latitude'])
altitude = _str_to_float(json['geoData']['altitude'])
# Prioritise geoData set from GPhotos editor. If it's blank, fall back to geoDataExif
if longitude == 0 and latitude == 0:
longitude = _str_to_float(json['geoDataExif']['longitude'])
latitude = _str_to_float(json['geoDataExif']['latitude'])
altitude = _str_to_float(json['geoDataExif']['altitude'])
# latitude >= 0: North latitude -> "N"
# latitude < 0: South latitude -> "S"
# longitude >= 0: East longitude -> "E"
# longitude < 0: West longitude -> "W"
if longitude >= 0:
longitude_ref = 'E'
else:
longitude_ref = 'W'
longitude = longitude * -1
if latitude >= 0:
latitude_ref = 'N'
else:
latitude_ref = 'S'
latitude = latitude * -1
# referenced from https://gist.github.com/c060604/8a51f8999be12fc2be498e9ca56adc72
gps_ifd = {
_piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0)
}
# skips it if it's empty
if latitude != 0 or longitude != 0:
gps_ifd.update({
_piexif.GPSIFD.GPSLatitudeRef: latitude_ref,
_piexif.GPSIFD.GPSLatitude: degToDmsRational(latitude),
_piexif.GPSIFD.GPSLongitudeRef: longitude_ref,
_piexif.GPSIFD.GPSLongitude: degToDmsRational(longitude)
})
if altitude != 0:
gps_ifd.update({
_piexif.GPSIFD.GPSAltitudeRef: 1,
_piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude))
})
gps_exif = {"GPS": gps_ifd}
exif_dict.update(gps_exif)
try:
_piexif.insert(_piexif.dump(exif_dict), str(file))
except Exception as e:
logger.debug("Couldn't insert geo exif!")
# local variable 'new_value' referenced before assignment means that one of the GPS values is incorrect
logger.debug(e)
# ============ END OF GPS STUFF ============
# Fixes ALL metadata, takes just file and dir and figures it out
def fix_metadata(file: Path):
# logger.info(file)
has_nice_date = False
try:
set_creation_date_from_exif(file)
has_nice_date = True
except (_piexif.InvalidImageDataError, ValueError, IOError) as e:
logger.debug(e)
logger.debug(f'No exif for {file}')
except IOError:
logger.debug('No creation date found in exif!')
try:
google_json = find_json_for_file(file)
date = get_date_str_from_json(google_json)
set_file_geo_data(file, google_json)
set_file_exif_date(file, date)
set_creation_date_from_str(file, date)
has_nice_date = True
return
except FileNotFoundError as e:
logger.debug(e)
if has_nice_date:
return True
logger.debug(f'Last option, copying folder meta as date for {file}')
date = get_date_from_folder_meta(file.parent)
if date is not None:
set_file_exif_date(file, date)
set_creation_date_from_str(file, date)
nonlocal s_date_from_folder_files
s_date_from_folder_files.append(str(file.resolve()))
return True
else:
logger.warning(f'There was literally no option to set date on {file}')
nonlocal s_no_date_at_all
s_no_date_at_all.append(str(file.resolve()))
return False
# PART 2: Copy all photos and videos to target folder
# Makes a new name like 'photo(1).jpg'
def new_name_if_exists(file: Path):
new_name = file
i = 1
while True:
if not new_name.is_file():
return new_name
else:
new_name = file.with_name(f"{file.stem}({i}){file.suffix}")
rename_map[str(file)] = new_name
i += 1
def copy_to_target(file: Path):
if is_photo(file) or is_video(file):
new_file = new_name_if_exists(FIXED_DIR / file.name)
_shutil.copy2(file, new_file)
nonlocal s_copied_files
s_copied_files += 1
return True
def copy_to_target_and_divide(file: Path):
creation_date = file.stat().st_mtime
date = datetime_from_timestamp(creation_date)
new_path = FIXED_DIR / f"{date.year}/{date.month:02}/"
new_path.mkdir(parents=True, exist_ok=True)
new_file = new_name_if_exists(new_path / file.name)
_shutil.copy2(file, new_file)
nonlocal s_copied_files
s_copied_files += 1
return True
# xD python lambdas are shit - this is only because we can't do 2 commands, so we do them in arguments
def _walk_with_tqdm(res, bar: _tqdm):
bar.update()
return res
# Count *all* photo and video files - this is hacky, and we should use .rglob altogether instead of is_photo
logger.info("Counting how many input files we have ahead...")
_input_files_count = 0
for ext in _tqdm(photo_formats + video_formats, unit='formats'):
_input_files_count += len(list(PHOTOS_DIR.rglob(f'**/*{ext}')))
logger.info(f'Input files: {_input_files_count}')
logger.info('=====================')
logger.info('Fixing files metadata and creation dates...')
# tqdm progress bar stuff
_metadata_bar = _tqdm(total=_input_files_count, unit='files')
for_all_files_recursive(
dir=PHOTOS_DIR,
file_function=lambda f: _walk_with_tqdm(fix_metadata(f), _metadata_bar),
# TODO (probably never, but should): Change this maybe to path.rglob
filter_fun=lambda f: (is_photo(f) or is_video(f))
)
_metadata_bar.close()
logger.info('=====================')
logger.info('=====================')
_copy_bar = _tqdm(total=_input_files_count, unit='files')
if args.divide_to_dates:
logger.info('Creating subfolders and dividing files based on date...')
for_all_files_recursive(
dir=PHOTOS_DIR,
file_function=lambda f: _walk_with_tqdm(copy_to_target_and_divide(f), _copy_bar)
)
else:
logger.info('Copying all files to one folder...')
logger.info('(If you want, you can get them organized in folders based on year and month.'
' Run with --divide-to-dates to do this)')
for_all_files_recursive(
dir=PHOTOS_DIR,
file_function=lambda f: _walk_with_tqdm(copy_to_target(f), _copy_bar),
filter_fun=lambda f: (is_photo(f) or is_video(f))
)
_copy_bar.close()
logger.info('=====================')
logger.info('=====================')
logger.info('Finding duplicates...')
find_duplicates(FIXED_DIR, lambda f: (is_photo(f) or is_video(f)))
logger.info('Removing duplicates...')
remove_duplicates()
logger.info('=====================')
if args.albums is not None:
if args.albums.lower() == 'json':
logger.info('=====================')
logger.info('Populate json file with albums...')
logger.info('=====================')
for_all_files_recursive(
dir=PHOTOS_DIR,
folder_function=populate_album_map
)
file = PHOTOS_DIR / 'albums.json'
with open(file, 'w', encoding="utf-8") as outfile:
_json.dump(album_mmap, outfile)
logger.info(str(file))
logger.info('')
logger.info('DONE! FREEEEEDOOOOM!!!')
logger.info('')
logger.info("Final statistics:")
logger.info(f"Files copied to target folder: {s_copied_files}")
logger.info(f"Removed duplicates: {s_removed_duplicates_count}")
logger.info(f"Files for which we couldn't find json: {len(s_no_json_found)}")
if len(s_no_json_found) > 0:
with open(PHOTOS_DIR / 'no_json_found.txt', 'w', encoding="utf-8") as f:
f.write("# This file contains list of files for which there was no corresponding .json file found\n")
f.write("# You might find it useful, but you can safely delete this :)\n")
f.write("\n".join(s_no_json_found))
logger.info(f" - you have full list in {f.name}")
logger.info(f"Files where inserting new exif failed: {len(s_cant_insert_exif_files)}")
if len(s_cant_insert_exif_files) > 0:
logger.info("(This is not necessary bad thing - pretty much all videos fail, "
"and your photos probably have their original exif already")
with open(PHOTOS_DIR / 'failed_inserting_exif.txt', 'w', encoding="utf-8") as f:
f.write("# This file contains list of files where setting right exif date failed\n")
f.write("# You might find it useful, but you can safely delete this :)\n")
f.write("\n".join(s_cant_insert_exif_files))
logger.info(f" - you have full list in {f.name}")
logger.info(f"Files where date was set from name of the folder: {len(s_date_from_folder_files)}")
if len(s_date_from_folder_files) > 0:
with open(PHOTOS_DIR / 'date_from_folder_name.txt', 'w', encoding="utf-8") as f:
f.write("# This file contains list of files where date was set from name of the folder\n")
f.write("# You might find it useful, but you can safely delete this :)\n")
f.write("\n".join(s_date_from_folder_files))
logger.info(f" - you have full list in {f.name}")
if args.skip_extras or args.skip_extras_harder:
# Remove duplicates: https://www.w3schools.com/python/python_howto_remove_duplicates.asp
s_skipped_extra_files = list(dict.fromkeys(s_skipped_extra_files))
logger.info(f"Extra files that were skipped: {len(s_skipped_extra_files)}")
with open(PHOTOS_DIR / 'skipped_extra_files.txt', 'w', encoding="utf-8") as f:
f.write("# This file contains list of extra files (ending with '-edited' etc) which were skipped because "
"you've used either --skip-extras or --skip-extras-harder\n")
f.write("# You might find it useful, but you can safely delete this :)\n")
f.write("\n".join(s_skipped_extra_files))
logger.info(f" - you have full list in {f.name}")
if len(s_no_date_at_all) > 0:
logger.info('')
logger.info(f"!!! There were {len(s_no_date_at_all)} files where there was absolutely no way to set "
f"a correct date! They will probably appear at the top of the others, as their 'last modified' "
f"value is set to moment of downloading your takeout :/")
with open(PHOTOS_DIR / 'unsorted.txt', 'w', encoding="utf-8") as f:
f.write("# This file contains list of files where there was no way to set correct date!\n")
f.write("# You probably want to set their dates manually - but you can delete this if you want\n")
f.write("\n".join(s_no_date_at_all))
logger.info(f" - you have full list in {f.name}")
logger.info('')
logger.info('Sooo... what now? You can see README.md for what nice G Photos alternatives I found and recommend')
logger.info('')
logger.info('If I helped you, you can consider donating me: https://www.paypal.me/TheLastGimbus')
logger.info('Have a nice day!')
if __name__ == '__main__':
main()
|
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import picklers
metadata = None
def step_numbering(step):
"""order in whole steps"""
def f(index, collection):
return step * index
return f
def fibonacci_numbering(order_col):
"""
almost fibonacci- skip the first 2 steps
e.g. 1, 2, 3, 5, 8, ... instead of 0, 1, 1, 2, 3, ...
otherwise ordering of the elements at '1' is undefined... ;)
"""
def f(index, collection):
if index == 0:
return 1
elif index == 1:
return 2
else:
return getattr(collection[index - 1], order_col) + getattr(
collection[index - 2], order_col
)
return f
def alpha_ordering(index, collection):
"""
0 -> A, 1 -> B, ... 25 -> Z, 26 -> AA, 27 -> AB, ...
"""
s = ""
while index > 25:
d = index / 26
s += chr((d % 26) + 64)
index -= d * 26
s += chr(index + 65)
return s
class OrderingListTest(fixtures.MappedTest):
def setup_test(self):
global metadata, slides_table, bullets_table, Slide, Bullet
slides_table, bullets_table = None, None
Slide, Bullet = None, None
metadata = MetaData()
def _setup(self, test_collection_class):
"""Build a relationship situation using the given
test_collection_class factory"""
global metadata, slides_table, bullets_table, Slide, Bullet
slides_table = Table(
"test_Slides",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(128)),
)
bullets_table = Table(
"test_Bullets",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("slide_id", Integer, ForeignKey("test_Slides.id")),
Column("position", Integer),
Column("text", String(128)),
)
class Slide(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Slide "%s">' % self.name
class Bullet(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return '<Bullet "%s" pos %s>' % (self.text, self.position)
clear_mappers()
self.mapper_registry.map_imperatively(
Slide,
slides_table,
properties={
"bullets": relationship(
Bullet,
lazy="joined",
collection_class=test_collection_class,
backref="slide",
order_by=[bullets_table.c.position],
)
},
)
self.mapper_registry.map_imperatively(Bullet, bullets_table)
metadata.create_all(testing.db)
def teardown_test(self):
metadata.drop_all(testing.db)
def test_append_no_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=False)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 4)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4"]
found = [b.text for b in srt.bullets]
self.assert_(titles == found)
def test_append_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=True)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._raw_append(Bullet("raw"))
self.assert_(s1.bullets[4].position is None)
s1.bullets._reorder()
self.assert_(s1.bullets[4].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 5)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
srt.bullets._raw_append(Bullet("raw2"))
srt.bullets[-1].position = 6
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw", "raw2"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
def test_insert(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets.append(Bullet("1"))
s1.bullets.append(Bullet("2"))
s1.bullets.append(Bullet("3"))
s1.bullets.append(Bullet("4"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
s1.bullets.insert(2, Bullet("insert_at_2"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
self.assert_(s1.bullets[4].position == 4)
self.assert_(s1.bullets[1].text == "2")
self.assert_(s1.bullets[2].text == "insert_at_2")
self.assert_(s1.bullets[3].text == "3")
s1.bullets.insert(999, Bullet("999"))
self.assert_(len(s1.bullets) == 6)
self.assert_(s1.bullets[5].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 6)
texts = ["1", "2", "insert_at_2", "3", "4", "999"]
found = [b.text for b in srt.bullets]
self.assert_(texts == found)
def test_slice(self):
self._setup(ordering_list("position"))
b = [
Bullet("1"),
Bullet("2"),
Bullet("3"),
Bullet("4"),
Bullet("5"),
Bullet("6"),
]
s1 = Slide("Slide #1")
# 1, 2, 3
s1.bullets[0:3] = b[0:3]
for i in 0, 1, 2:
self.assert_(s1.bullets[i].position == i)
self.assert_(s1.bullets[i] == b[i])
# 1, 4, 5, 6, 3
s1.bullets[1:2] = b[3:6]
for li, bi in (0, 0), (1, 3), (2, 4), (3, 5), (4, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
# 1, 6, 3
del s1.bullets[1:3]
for li, bi in (0, 0), (1, 5), (2, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
texts = ["1", "6", "3"]
for i, text in enumerate(texts):
self.assert_(srt.bullets[i].position == i)
self.assert_(srt.bullets[i].text == text)
def test_replace(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets = [Bullet("1"), Bullet("2"), Bullet("3")]
self.assert_(len(s1.bullets) == 3)
self.assert_(s1.bullets[2].position == 2)
session = fixture_session()
session.add(s1)
session.flush()
new_bullet = Bullet("new 2")
self.assert_(new_bullet.position is None)
# mark existing bullet as db-deleted before replacement.
# session.delete(s1.bullets[1])
s1.bullets[1] = new_bullet
self.assert_(new_bullet.position == 1)
self.assert_(len(s1.bullets) == 3)
id_ = s1.id
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
self.assert_(srt.bullets[1].text == "new 2")
self.assert_(srt.bullets[2].text == "3")
def test_replace_two(self):
"""test #3191"""
self._setup(ordering_list("position", reorder_on_append=True))
s1 = Slide("Slide #1")
b1, b2, b3, b4 = Bullet("1"), Bullet("2"), Bullet("3"), Bullet("4")
s1.bullets = [b1, b2, b3]
eq_([b.position for b in s1.bullets], [0, 1, 2])
s1.bullets = [b4, b2, b1]
eq_([b.position for b in s1.bullets], [0, 1, 2])
def test_funky_ordering(self):
class Pos(object):
def __init__(self):
self.position = None
step_factory = ordering_list(
"position", ordering_func=step_numbering(2)
)
stepped = step_factory()
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
for li, pos in (0, 0), (1, 2), (2, 4), (3, 6):
self.assert_(stepped[li].position == pos)
fib_factory = ordering_list(
"position", ordering_func=fibonacci_numbering("position")
)
fibbed = fib_factory()
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
for li, pos in (0, 1), (1, 2), (2, 3), (3, 5), (4, 8):
self.assert_(fibbed[li].position == pos)
fibbed.insert(2, Pos())
fibbed.insert(4, Pos())
fibbed.insert(6, Pos())
for li, pos in (
(0, 1),
(1, 2),
(2, 3),
(3, 5),
(4, 8),
(5, 13),
(6, 21),
(7, 34),
):
self.assert_(fibbed[li].position == pos)
alpha_factory = ordering_list("position", ordering_func=alpha_ordering)
alpha = alpha_factory()
alpha.append(Pos())
alpha.append(Pos())
alpha.append(Pos())
alpha.insert(1, Pos())
for li, pos in (0, "A"), (1, "B"), (2, "C"), (3, "D"):
self.assert_(alpha[li].position == pos)
def test_picklability(self):
from sqlalchemy.ext.orderinglist import OrderingList
olist = OrderingList("order", reorder_on_append=True)
olist.append(DummyItem())
for loads, dumps in picklers():
pck = dumps(olist)
copy = loads(pck)
self.assert_(copy == olist)
self.assert_(copy.__dict__ == olist.__dict__)
class DummyItem(object):
def __init__(self, order=None):
self.order = order
def __eq__(self, other):
return self.order == other.order
def __ne__(self, other):
return not (self == other)
|
#!/usr/bin/python2
'''
=== PREREQUISITES ===
Run in Python 2
Install requests library, via macOS terminal:
sudo pip install requests
=== DESCRIPTION ===
This script finds all MS switchports that match the input search parameter, searching either by clients from a file listing MAC addresses (one per line), a specific tag in Dashboard currently applied to ports, or the specific access policy currently configured. It then changes the configuration of the port by applying the new access policy specified. Its counterpart script find_ports.py can be first used to check, as it does not change any configs.
=== USAGE ===
python update_ports.py -k <api_key> -o <org_id> -s <search_parameter> [-t <time>] -p <policy>
The -s parameter will be either a local file of MAC addresses (one per line), a currently configured port tag in Dashboard, or the currently configured access policy (number of policy slot) on the Switch > Access policy page. Option -t, if using input list of MACs, to only search for clients that were last seen within t minutes, default is 15. -p specifies the slot # of the new access policy to configure on matching ports.
'''
import getopt
import json
import requests
import sys
from datetime import datetime
# Prints a line of text that is meant for the user to read
def printusertext(p_message):
print('# %s' % p_message)
# Prints help text
def printhelp():
printusertext('This script finds all MS switchports that match the input search parameter,')
printusertext('searching either by clients from a file listing MAC addresses (one per line),')
printusertext('a specific tag in Dashboard currently applied to ports, or the specific')
printusertext('access policy currently configured. It then changes the configuration of the')
printusertext('port by applying the new access policy specified. Its counterpart script')
printusertext('find_ports.py can be first used to check, as it does not change any configs.')
printusertext('')
printusertext('Usage:')
printusertext('python update_ports.py -k <api_key> -o <org_id> -s <search_parameter> [-t <time>] -p <policy>')
printusertext('The -s parameter will be either a local file of MAC addresses (one per line),')
printusertext('a currently configured port tag in Dashboard, or the currently configured')
printusertext('access policy (number of policy slot) on the Switch > Access policy page.')
printusertext('Option -t, if using input list of MACs, to only search for clients')
printusertext('that were last seen within t minutes, default is 15.')
printusertext('-p specifies the slot # of the new access policy to configure on matching ports.')
def list_networks(api_key, org_id):
url = 'https://dashboard.meraki.com/api/v0/organizations/{}/networks'.format(org_id)
try:
response = requests.get(url=url, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print('Error calling list_networks: {}'.format(e))
def get_inventory(api_key, org_id):
url = 'https://dashboard.meraki.com/api/v0/organizations/{}/inventory'.format(org_id)
try:
response = requests.get(url=url, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print('Error calling get_inventory: {}'.format(e))
def list_switch_ports(api_key, serial):
url = 'https://dashboard.meraki.com/api/v0/devices/{}/switchPorts'.format(serial)
try:
response = requests.get(url=url, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print('Error calling list_switch_ports with serial number {}: {}'.format(serial, e))
def get_port_details(api_key, serial, number):
url = 'https://dashboard.meraki.com/api/v0/devices/{}/switchPorts/{}'.format(serial, number)
try:
response = requests.get(url=url, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print('Error calling get_port_details with serial {} and port {}: {}'.format(serial, number, e))
def update_switch_port(api_key, serial, number, data):
url = 'https://dashboard.meraki.com/api/v0/devices/{}/switchPorts/{}'.format(serial, number)
try:
response = requests.put(url=url, data=data, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print('Error calling update_switch_port with serial {}, port {}, and data {}: {}'.format(serial, number, data, e))
def list_clients(api_key, serial, timestamp=86400): # timestamp in seconds
url = 'https://dashboard.meraki.com/api/v0/devices/{}/clients?timespan={}'.format(serial, timestamp)
try:
response = requests.get(url=url, headers={'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'})
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print ('Error calling list_clients with serial {}: {}'.format(serial, e))
def main(argv):
# Set default values for command line arguments
API_KEY = ORG_ID = ARG_SEARCH = ARG_TIME = ARG_POLICY = 'null'
# Get command line arguments
try:
opts, args = getopt.getopt(argv, 'hk:o:s:t:p:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
API_KEY = arg
elif opt == '-o':
ORG_ID = arg
elif opt == '-s':
ARG_SEARCH = arg
elif opt == '-t':
ARG_TIME = arg
elif opt == '-p':
ARG_POLICY = arg
# Check if all parameters are required parameters have been given
if API_KEY == 'null' or ORG_ID == 'null' or ARG_SEARCH == 'null' or ARG_POLICY == 'null':
printhelp()
sys.exit(2)
# Assign search parameter
search_file = search_policy = search_tag = None
try:
# Check if search parameter is file
search_file = open(ARG_SEARCH)
except IOError:
try:
# Check if search parameter is number
search_policy = int(ARG_SEARCH)
except ValueError:
search_tag = ARG_SEARCH
# Assign default time option if not specified
try:
search_time = int(ARG_TIME)
search_time *= 60
except ValueError:
search_time = 60*15
# Check that new policy is a number
try:
new_policy = int(ARG_POLICY)
except ValueError:
printhelp()
sys.exit(2)
# Find all MS networks
session = requests.session()
inventory = get_inventory(API_KEY, ORG_ID)
switches = [device for device in inventory if device['model'][:2] in ('MS') and device['networkId'] is not None]
switch_networks = []
for switch in switches:
if switch['networkId'] not in switch_networks:
switch_networks.append(switch['networkId'])
print('Found a total of %d switches configured across %d networks in this organization.' % (len(switches), len(switch_networks)))
# Find all ports with search parameter
if search_file is not None:
# Searching on file with list of MAC addresses
macs = search_file.read().split('\n')
macs = [mac.upper() for mac in macs]
print('Searching on list of %d MACs in file %s, with first and last addresses being %s and %s, respectively.' % (len(macs), ARG_SEARCH, macs[0], macs[-1]))
tally_ports = 0
# Find all clients per switch that match list
for switch in switches:
# Find clients that were connected in last 15 minutes
clients = list_clients(API_KEY, switch['serial'], search_time)
# Helper variable that is a list of all MAC addresses, in upper-case to compare with master input list
clients_macs = [client['mac'].upper() for client in clients]
# Helper variable that is a dict of MAC address keys to client information values
matching_dict = {}
for (mac, client) in zip(clients_macs, clients):
matching_dict[mac] = client
# Find matches between clients on switch to master input list
matches = set(clients_macs).intersection(macs)
# Find ports of matched clients
if len(matches) > 0:
matched_ports = {}
for match in matches:
port = matching_dict[match]['switchport']
if port not in matched_ports:
matched_ports[port] = 1
else:
matched_ports[port] += 1
print('Found %d matched MAC addresses on switch %s' % (len(matches), switch['serial']))
tally_ports += len(matched_ports.keys())
for port in matched_ports.keys():
switchport = get_port_details(API_KEY, switch['serial'], port)
if switchport['accessPolicyNumber'] == new_policy:
continue
else:
switchport['accessPolicyNumber'] = new_policy
update_switch_port(API_KEY, switch['serial'], switchport['number'], json.dumps(switchport))
print('Configured %d matched ports on switch %s' % (len(matched_ports), switch['serial']))
print('Configured %d total ports matching search criteria.' % (tally_ports))
elif search_policy is not None:
# Searching on access policy
print('Searching on switch ports configured with access policy %d.' % (search_policy))
tally_ports = 0
for switch in switches:
ports = list_switch_ports(API_KEY, switch['serial'])
matched_ports = [port for port in ports if port['accessPolicyNumber'] != None and search_policy == port['accessPolicyNumber']]
if len(matched_ports) > 0:
# Change access policy for all matched ports
for port in matched_ports:
if port['accessPolicyNumber'] == new_policy:
continue
else:
port['accessPolicyNumber'] = new_policy
update_switch_port(API_KEY, switch['serial'], port['number'], json.dumps(port))
print('Configured %d matched ports on switch %s' % (len(matched_ports), switch['serial']))
tally_ports += len(matched_ports)
print('Configured %d total ports matching search criteria.' % (tally_ports))
else:
# Searching on port tag
print('Searching on switch ports configured with tag %s.' % (search_tag))
tally_ports = 0
for switch in switches:
ports = list_switch_ports(API_KEY, switch['serial'])
matched_ports = [port for port in ports if port['tags'] != None and search_tag in port['tags']]
if len(matched_ports) > 0:
# Change access policy for all matched ports
for port in matched_ports:
if port['accessPolicyNumber'] == new_policy:
continue
else:
port['accessPolicyNumber'] = new_policy
update_switch_port(API_KEY, switch['serial'], port['number'], json.dumps(port))
print('Configured %d matched ports on switch %s' % (len(matched_ports), switch['serial']))
tally_ports += len(matched_ports)
print('Configured %d total ports matching search criteria.' % (tally_ports))
if __name__ == '__main__':
startTime = datetime.now()
print('Starting script at: %s' % startTime)
print('Arguments entered: %s' % sys.argv[1:])
main(sys.argv[1:])
print('Ending script at: %s' % datetime.now())
print('Total run time: %s' % (datetime.now() - startTime))
|
import os
import csv
import numpy as np
import re
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
from deepac.utils import set_mem_growth
from Bio import SeqIO
from shap import DeepExplainer
from deepac.explain.rf_sizes import get_rf_size
from tqdm import tqdm
def get_filter_contribs(args, allow_eager=False):
"""Calculate DeepLIFT contribution scores for all neurons in the convolutional layer
and extract all motifs for which a filter neuron got a non-zero contribution score."""
if tf.executing_eagerly() and not allow_eager:
print("Using SHAP. Disabling eager execution...")
tf.compat.v1.disable_v2_behavior()
set_mem_growth()
model = load_model(args.model)
max_only = args.partial or args.easy_partial or not args.all_occurrences
check_additivity = not args.no_check
if args.w_norm and not args.do_lstm:
print("Create model with mean-centered weight matrices ...")
conv_layer_idx = [idx for idx, layer in enumerate(model.layers) if "Conv1D" in str(layer)][0]
kernel_normed, bias_normed = normalize_filter_weights(model.get_layer(index=conv_layer_idx).get_weights()[0],
model.get_layer(index=conv_layer_idx).get_weights()[1])
model.get_layer(index=conv_layer_idx).set_weights([kernel_normed, bias_normed])
path = args.model
if re.search("\.h5$", path) is not None:
path = re.sub("\.h5$", "", path)
norm_path = path + "_w_norm.h5"
model.save(norm_path)
args.model = norm_path
# extract some model information
if args.do_lstm:
conv_layer_idx = [idx for idx, layer in enumerate(model.layers)
if "Bidirectional" in str(layer)][args.inter_layer - 1]
n_filters = model.get_layer(index=conv_layer_idx).get_output_at(0).shape[-1]
input_layer_id = [idx for idx, layer in enumerate(model.layers) if "Input" in str(layer)][0]
motif_length = model.get_layer(index=input_layer_id).get_output_at(0).shape[1]
pad_left = 0
pad_right = 0
else:
conv_layer_ids = [idx for idx, layer in enumerate(model.layers) if "Conv1D" in str(layer)]
conv_layer_idx = conv_layer_ids[args.inter_layer - 1]
motif_length = get_rf_size(model, conv_layer_idx)
n_filters = model.get_layer(index=conv_layer_idx).get_weights()[0].shape[-1]
pad_left = (motif_length - 1) // 2
pad_right = motif_length - 1 - pad_left
print(model.summary())
print("Loading test data (.npy) ...")
test_data_set_name = os.path.splitext(os.path.basename(args.test_data))[0]
samples = np.load(args.test_data, mmap_mode='r')
total_num_reads = samples.shape[0]
len_reads = samples.shape[1]
print("Loading test data (.fasta) ...")
nonpatho_reads = list(SeqIO.parse(args.nonpatho_test, "fasta"))
patho_reads = list(SeqIO.parse(args.patho_test, "fasta"))
reads = nonpatho_reads + patho_reads
for idx, r in enumerate(reads):
r.id = test_data_set_name + "_seq_" + str(idx) + "_" + os.path.basename(r.id)
r.description = test_data_set_name + "_seq_" + str(idx) + "_" + os.path.basename(r.description)
r.name = test_data_set_name + "_seq_" + str(idx) + "_" + os.path.basename(r.name)
print("Padding reads ...")
reads = ["N" * pad_left + r + "N" * pad_right for r in reads]
assert len(reads) == total_num_reads, \
"Test data in .npy-format and fasta files containing different number of reads!"
# create output directory and subdirectories
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
if not os.path.exists(args.out_dir + "/filter_scores/"):
os.makedirs(args.out_dir + "/filter_scores/")
if not os.path.exists(args.out_dir + "/fasta/"):
os.makedirs(args.out_dir + "/fasta/")
if (args.partial or args.easy_partial) and not os.path.exists(args.out_dir + "/nuc_scores/"):
os.makedirs(args.out_dir + "/nuc_scores/")
# load or create reference sequences
ref_samples = get_reference_seqs(args, len_reads)
num_ref_seqs = ref_samples.shape[0]
print("Running DeepSHAP ...")
chunk_size = args.chunk_size // num_ref_seqs
i = 0
if tf.executing_eagerly():
intermediate_model = tf.keras.Model(model.inputs,
(model.get_layer(index=conv_layer_idx).get_output_at(0),
model.get_layer(index=conv_layer_idx).get_output_at(1)))
def map2layer(input_samples):
out = intermediate_model(input_samples, training=False)
return out[0].numpy(), out[1].numpy()
intermediate_ref_fwd, intermediate_ref_rc = map2layer(ref_samples)
else:
def map2layer(input_samples, layer, out_node):
feed_dict = dict(zip([model.get_layer(index=0).input], [input_samples]))
return tf.compat.v1.keras.backend.get_session().run(model.get_layer(index=layer).get_output_at(out_node),
feed_dict)
intermediate_ref_fwd = map2layer(ref_samples, conv_layer_idx, 0)
intermediate_ref_rc = map2layer(ref_samples, conv_layer_idx, 1)
intermediate_ref_fwd = intermediate_ref_fwd.mean(axis=0, keepdims=True)
intermediate_ref_rc = intermediate_ref_rc.mean(axis=0, keepdims=True)
explainer = DeepExplainer(([model.get_layer(index=conv_layer_idx).get_output_at(0),
model.get_layer(index=conv_layer_idx).get_output_at(1)],
model.layers[-1].output),
[intermediate_ref_fwd,
intermediate_ref_rc])
filter_range = range(n_filters)
if args.inter_neuron is not None:
filter_range = [None]*n_filters
for n in args.inter_neuron:
filter_range[n] = n
while i < total_num_reads:
print("Done "+str(i)+" from "+str(total_num_reads)+" sequences")
samples_chunk = samples[i:i+chunk_size, :, :]
reads_chunk = reads[i:i+chunk_size]
if tf.executing_eagerly():
intermediate_fwd, intermediate_rc = map2layer(ref_samples)
else:
intermediate_fwd = map2layer(samples_chunk, conv_layer_idx, 0)
intermediate_rc = map2layer(samples_chunk, conv_layer_idx, 1)
inter_diff_fwd = intermediate_fwd - intermediate_ref_fwd
inter_diff_rc = intermediate_rc - intermediate_ref_rc
scores_filter = explainer.shap_values([intermediate_fwd,
intermediate_rc], check_additivity=check_additivity)
scores_fwd, scores_rc = scores_filter[0]
# shape: [num_reads, len_reads, n_filters]
print("Getting data ...")
# for each filter do:
if args.do_lstm:
dat_fwd = [get_lstm_data(i, scores_filter_avg=scores_fwd,
input_reads=reads_chunk, motif_len=motif_length) for i in filter_range]
dat_rc = [get_lstm_data(i, scores_filter_avg=scores_rc,
input_reads=reads_chunk, motif_len=motif_length,
rc=True) for i in filter_range]
else:
dat_fwd = [get_filter_data(i, scores_filter_avg=scores_fwd,
input_reads=reads_chunk, motif_len=motif_length,
max_only=max_only) for i in filter_range]
dat_rc = [get_filter_data(i, scores_filter_avg=scores_rc,
input_reads=reads_chunk, motif_len=motif_length, rc=True,
max_only=max_only) for i in filter_range]
if max_only:
dat_max = [get_max_strand(i, dat_fwd=dat_fwd, dat_rc=dat_rc) for i in filter_range]
contrib_dat_fwd, motif_dat_fwd, contrib_dat_rc, motif_dat_rc = list(zip(*dat_max))
else:
contrib_dat_fwd, motif_dat_fwd = list(zip(*dat_fwd))
contrib_dat_rc, motif_dat_rc = list(zip(*dat_rc))
print("Saving data ...")
if contrib_dat_fwd:
for f in filter_range:
write_filter_data(f, contribution_data=contrib_dat_fwd, motifs=motif_dat_fwd,
out_dir=args.out_dir, data_set_name=test_data_set_name)
if contrib_dat_rc:
for f in filter_range:
write_filter_data(f, contribution_data=contrib_dat_rc, motifs=motif_dat_rc,
out_dir=args.out_dir, data_set_name=test_data_set_name)
if args.partial:
print("Getting partial data ...")
partials_nt_fwd = [get_partials(i, model=model, conv_layer_idx=conv_layer_idx,
node=0, ref_samples=ref_samples,
contribution_data=contrib_dat_fwd, samples_chunk=samples_chunk,
input_reads=reads_chunk, intermediate_diff=inter_diff_fwd,
pad_left=pad_left, pad_right=pad_right, lstm=args.do_lstm,
check_additivity=check_additivity)
for i in filter_range]
partials_nt_rc = [get_partials(i, model=model, conv_layer_idx=conv_layer_idx,
node=1, ref_samples=ref_samples,
contribution_data=contrib_dat_rc, samples_chunk=samples_chunk,
input_reads=reads_chunk, intermediate_diff=inter_diff_rc,
pad_left=pad_left, pad_right=pad_right, lstm=args.do_lstm,
check_additivity=check_additivity)
for i in filter_range]
elif args.easy_partial:
print("Getting partial data ...")
partials_nt_fwd = [get_easy_partials(i, model=model, conv_layer_idx=conv_layer_idx, node=0,
contribution_data=contrib_dat_fwd, samples_chunk=samples_chunk,
input_reads=reads_chunk, intermediate_diff=inter_diff_fwd,
pad_left=pad_left, pad_right=pad_right) for i in filter_range]
partials_nt_rc = [get_easy_partials(i, model=model, conv_layer_idx=conv_layer_idx, node=1,
contribution_data=contrib_dat_rc, samples_chunk=samples_chunk,
input_reads=reads_chunk, intermediate_diff=inter_diff_rc,
pad_left=pad_left, pad_right=pad_right) for i in filter_range]
if args.partial or args.easy_partial:
scores_nt_fwd, read_ids_fwd = list(zip(*partials_nt_fwd))
scores_nt_rc, read_ids_rc = list(zip(*partials_nt_rc))
print("Saving partial data ...")
if scores_nt_fwd:
for f in filter_range:
write_partial_data(f, read_ids=read_ids_fwd, contribution_data=contrib_dat_fwd,
scores_input_pad=scores_nt_fwd, out_dir=args.out_dir,
data_set_name=test_data_set_name, motif_len=motif_length)
if scores_nt_rc:
for f in filter_range:
write_partial_data(f, read_ids=read_ids_rc, contribution_data=contrib_dat_rc,
scores_input_pad=scores_nt_rc, out_dir=args.out_dir,
data_set_name=test_data_set_name, motif_len=motif_length)
i += chunk_size
print("Done "+str(min(i, total_num_reads))+" from "+str(total_num_reads)+" sequences")
def get_max_strand(filter_id, dat_fwd, dat_rc):
if filter_id is None:
return [], [], [], []
i = filter_id
contrib_dat_fwd = []
motif_dat_fwd = []
contrib_dat_rc = []
motif_dat_rc = []
for seq_id in range(len(dat_fwd[i][0])):
record_fwd = dat_fwd[i]
record_rc = dat_rc[i]
# if any contributions at all
if len(record_fwd[0][seq_id][1]) > 0 and len(record_rc[0][seq_id][1]) > 0:
# if abs score on fwd higher than on rc
if np.abs(record_fwd[0][seq_id][2]) >= np.abs(record_rc[0][seq_id][2]):
contrib_dat_fwd.append(record_fwd[0][seq_id])
motif_dat_fwd.append(record_fwd[1][seq_id])
contrib_dat_rc.append([])
motif_dat_rc.append("")
else:
contrib_dat_rc.append(record_rc[0][seq_id])
motif_dat_rc.append(record_rc[1][seq_id])
contrib_dat_fwd.append([])
motif_dat_fwd.append("")
elif len(record_fwd[0][seq_id][1]) > 0 and not (len(record_rc[0][seq_id][1]) > 0):
contrib_dat_fwd.append(record_fwd[0][seq_id])
motif_dat_fwd.append(record_fwd[1][seq_id])
contrib_dat_rc.append([])
motif_dat_rc.append("")
elif not (len(record_fwd[0][seq_id][1]) > 0) and len(record_rc[0][seq_id][1]) > 0:
contrib_dat_rc.append(record_rc[0][seq_id])
motif_dat_rc.append(record_rc[1][seq_id])
contrib_dat_fwd.append([])
motif_dat_fwd.append("")
else:
contrib_dat_rc.append([])
motif_dat_rc.append("")
contrib_dat_fwd.append([])
motif_dat_fwd.append("")
return contrib_dat_fwd, motif_dat_fwd, contrib_dat_rc, motif_dat_rc
def get_filter_data(filter_id, scores_filter_avg, input_reads, motif_len, rc=False, max_only=True):
# determine non-zero contribution scores per read and filter
# and extract DNA-sequence of corresponding subreads
if filter_id is None:
return [], []
num_reads = len(input_reads)
contribution_data = []
motifs = []
for seq_id in range(num_reads):
if np.any(scores_filter_avg[seq_id, :, filter_id]):
if max_only:
max_id = np.argmax(np.abs(scores_filter_avg[seq_id, :, filter_id]))
non_zero_neurons = np.asarray([max_id]) if scores_filter_avg[seq_id, max_id, filter_id] \
else np.empty((0,), dtype=int)
else:
non_zero_neurons = np.nonzero(scores_filter_avg[seq_id, :, filter_id])[0]
scores = scores_filter_avg[seq_id, non_zero_neurons, filter_id]
contribution_data.append((input_reads[seq_id].id, non_zero_neurons, scores))
if not rc:
motifs.append([input_reads[seq_id][non_zero_neuron:(non_zero_neuron + motif_len)]
for non_zero_neuron in non_zero_neurons])
else:
# Assume all reads are the same length
non_zero_neurons = scores_filter_avg.shape[1] - 1 - non_zero_neurons
ms = [input_reads[seq_id][non_zero_neuron:(non_zero_neuron + motif_len)]
for non_zero_neuron in non_zero_neurons]
motifs.append([m.reverse_complement(id=m.id + "_rc", description=m.description + "_rc")
for m in ms])
else:
contribution_data.append((input_reads[seq_id].id, [], []))
motifs.append("")
return contribution_data, motifs
def get_lstm_data(filter_id, scores_filter_avg, input_reads, motif_len, rc=False):
# determine non-zero contribution scores per read and filter
# and extract DNA-sequence of corresponding subreads
if filter_id is None:
return [], []
num_reads = len(input_reads)
contribution_data = []
motifs = []
for seq_id in range(num_reads):
if scores_filter_avg[seq_id, filter_id] != 0:
non_zero_neurons = [0]
scores = [scores_filter_avg[seq_id, filter_id]]
contribution_data.append((input_reads[seq_id].id, non_zero_neurons, scores))
if not rc:
motifs.append([input_reads[seq_id][non_zero_neuron:(non_zero_neuron + motif_len)]
for non_zero_neuron in non_zero_neurons])
else:
ms = [input_reads[seq_id][non_zero_neuron:(non_zero_neuron + motif_len)]
for non_zero_neuron in non_zero_neurons]
motifs.append([m.reverse_complement(id=m.id + "_rc", description=m.description + "_rc")
for m in ms])
else:
contribution_data.append((input_reads[seq_id].id, [], []))
motifs.append("")
return contribution_data, motifs
def write_filter_data(filter_id, contribution_data, motifs, data_set_name, out_dir):
if filter_id is None:
return
if contribution_data[filter_id] is not None and motifs[filter_id] is not None and \
len(contribution_data[filter_id]) > 0 and len(motifs[filter_id]) > 0:
# save filter contribution scores
filter_rel_file = \
out_dir + "/filter_scores/" + data_set_name + "_rel_filter_%d.csv" % filter_id
with open(filter_rel_file, 'a') as csv_file:
file_writer = csv.writer(csv_file)
for dat in contribution_data[filter_id]:
if len(dat) > 0 and len(dat[1]) > 0:
file_writer.writerow([">" + dat[0]])
file_writer.writerow(dat[1])
file_writer.writerow(dat[2])
# save subreads which cause non-zero contribution scores
filter_motifs_file = \
out_dir + "/fasta/" + data_set_name + "_motifs_filter_%d.fasta" % filter_id
with open(filter_motifs_file, "a") as output_handle:
SeqIO.write([subread for motif in motifs[filter_id] for subread in motif], output_handle, "fasta")
def get_partials(filter_id, model, conv_layer_idx, node, ref_samples, contribution_data, samples_chunk,
input_reads, intermediate_diff, pad_left, pad_right, lstm=False, check_additivity=False):
num_reads = len(input_reads)
if filter_id is None:
return [], []
read_ids = []
scores_pt_all = []
print("Processing filter: {}".format(filter_id))
if contribution_data[filter_id] is None or not (len(contribution_data[filter_id]) > 0):
return [], []
for seq_id in tqdm(range(num_reads)):
read_id = re.search("seq_[0-9]+", input_reads[seq_id].id).group()
read_id = int(read_id.replace("seq_", ""))
read_ids.append(read_id)
if contribution_data[filter_id][seq_id] is None or not (len(contribution_data[filter_id][seq_id]) > 0):
scores_pt_all.append(None)
continue
out = model.get_layer(index=conv_layer_idx).get_output_at(node)
if lstm:
out = out[:, filter_id:filter_id+1]
else:
out = out[:, contribution_data[filter_id][seq_id][1][0], filter_id:filter_id+1]
explainer_nt = DeepExplainer((model.get_layer(index=0).input, out), ref_samples)
sample = samples_chunk[seq_id, :, :].reshape((1, ref_samples.shape[1], ref_samples.shape[2]))
# Get difference in activation of the intermediate neuron
if lstm:
diff = intermediate_diff[seq_id, filter_id]
else:
diff = intermediate_diff[seq_id, contribution_data[filter_id][seq_id][1][0], filter_id]
scores_nt = explainer_nt.shap_values(sample, check_additivity=check_additivity)[0]
partials = np.asarray([phi_i * contribution_data[filter_id][seq_id][2][0] for phi_i in scores_nt]) / diff
partials = partials.reshape(partials.shape[1], partials.shape[2])
# Sum along the channel (nt) axis and pad
scores_pt_pad = np.sum(partials, axis=1)
scores_pt_pad = np.pad(scores_pt_pad, (pad_left, pad_right), 'constant', constant_values=0.0)
if node == 1:
scores_pt_pad = scores_pt_pad[::-1]
scores_pt_all.append(scores_pt_pad)
return scores_pt_all, read_ids
def get_easy_partials(filter_id, model, conv_layer_idx, node, contribution_data, samples_chunk,
input_reads, intermediate_diff, pad_left, pad_right):
if filter_id is None:
return [], []
num_reads = len(input_reads)
read_ids = []
scores_pt_all = []
if contribution_data[filter_id] is None or not (len(contribution_data[filter_id]) > 0):
return [], []
for seq_id in range(num_reads):
read_id = re.search("seq_[0-9]+", input_reads[seq_id].id).group()
read_id = int(read_id.replace("seq_", ""))
read_ids.append(read_id)
if contribution_data[filter_id][seq_id] is None or not (len(contribution_data[filter_id][seq_id]) > 0):
scores_pt_all.append(None)
continue
motif_length = get_rf_size(model, conv_layer_idx)
motif_start = contribution_data[filter_id][seq_id][1][0]
if node == 0:
sample = samples_chunk[seq_id, ::, ::]
else:
sample = samples_chunk[seq_id, ::-1, ::-1]
sample = np.pad(sample, ((pad_left, pad_right), (0, 0)), 'constant', constant_values=(0.0, 0.0))
sample = sample.reshape((1, sample.shape[0], sample.shape[1]))
sample = sample[:, motif_start:motif_start+motif_length, :]
# Get difference in activation of the intemediate neuron
diff = intermediate_diff[seq_id, contribution_data[filter_id][seq_id][1][0], filter_id]
# Assuming: first layer only, all-zero reference, no nonlinearity, one-hot encoded nucleotides
# Then: contributions to filter output are equal to the weights
scores_nt = model.get_layer(index=conv_layer_idx).get_weights()[0][:, :, filter_id]
dilation = model.get_layer(index=conv_layer_idx).get_config()["dilation_rate"][0]
if dilation > 1:
scores_nt = np.insert(scores_nt, np.repeat(np.arange(1, scores_nt.shape[0]), dilation-1),
np.zeros(4), axis=0)
scores_nt = np.multiply(scores_nt, sample)
partials = np.asarray([phi_i * contribution_data[filter_id][seq_id][2][0] for phi_i in scores_nt]) / diff
# Sum along the channel (nt) axis and pad
partials = partials.reshape(partials.shape[1], partials.shape[2])
scores_pt_pad = np.sum(partials, axis=1)
pad_right_read = \
intermediate_diff.shape[1] + pad_right + pad_left - \
contribution_data[filter_id][seq_id][1][0] - motif_length
pad_left_read = contribution_data[filter_id][seq_id][1][0]
scores_pt_pad = np.pad(scores_pt_pad, (pad_left_read, pad_right_read), 'constant', constant_values=0.0)
scores_pt_all.append(scores_pt_pad)
return scores_pt_all, read_ids
def write_partial_data(filter_id, read_ids, contribution_data, scores_input_pad, out_dir, data_set_name, motif_len):
if filter_id is None:
return
# save contribution scores for each nucleotide per filter motif
with open(out_dir + "/nuc_scores/" + data_set_name + "_rel_filter_%d_nucleotides.csv" % filter_id, 'a') \
as csv_file:
file_writer = csv.writer(csv_file)
for ind, seq_id in enumerate(read_ids[filter_id]):
# contribution_data[filter_id][ind][1][0] is the motif start
if len(contribution_data[filter_id][ind]) > 0 and len(scores_input_pad[filter_id][ind]) > 0:
scores = scores_input_pad[filter_id][ind]
scores = scores.tolist()
scores = scores[contribution_data[filter_id][ind][1][0]:(contribution_data[filter_id][ind][1][0] +
motif_len)]
row = ['%.4g' % s for s in scores]
file_writer.writerow([">" + contribution_data[filter_id][ind][0]])
file_writer.writerow(row)
def get_reference_seqs(args, len_reads):
"""
Load or create reference sequences for DeepLIFT.
"""
# generate reference sequence with N's
if args.ref_mode == "N":
print("Generating reference sequence with all Ns...")
num_ref_seqs = 1
ref_samples = np.zeros((num_ref_seqs, len_reads, 4))
# create reference sequences with same GC content as the training data set
elif args.ref_mode == "GC":
print("Generating reference sequences with same GC-content as training data set...")
train_samples = np.load(args.train_data, mmap_mode='r')
num_ref_seqs = 5
ref_seqs = [0]*num_ref_seqs
# calculate frequency of each nucleotide (A,C,G,T,N) in the training data set
probs = np.mean(np.mean(train_samples, axis=1), axis=0).tolist()
probs.append(1-sum(probs))
# generate reference seqs
for i in range(num_ref_seqs):
ref_seqs[i] = np.random.choice([0, 1, 2, 3, 4], p=probs, size=len_reads, replace=True)
ref_samples = to_categorical(ref_seqs, num_classes=5)
# remove channel of N-nucleotide
ref_samples = ref_samples[:, :, 0:4]
nc_dict = {0: 'A', 1: 'C', 2: 'G', 3: 'T', 4: 'N'}
train_data_set_name = os.path.splitext(os.path.basename(args.train_data))[0]
# save reference sequences
with open(args.out_dir + '/' + train_data_set_name + '_references.fasta', 'w') as csv_file:
file_writer = csv.writer(csv_file)
for seq_id in range(num_ref_seqs):
file_writer.writerow([">"+train_data_set_name+"_ref_"+str(seq_id)])
file_writer.writerow(["".join([nc_dict[base] for base in ref_seqs[seq_id]])])
del train_samples
# load own reference sequences (args.ref_mode == "own_ref_file")
else:
print("Loading reference sequences...")
tokenizer = Tokenizer(char_level=True)
tokenizer.fit_on_texts('ACGT')
ref_reads = list(SeqIO.parse(args.ref_seqs, "fasta"))
ref_samples = np.array([np.array([tokenizer.texts_to_matrix(read)]) for read in ref_reads])
# remove unused character
if not np.count_nonzero(ref_samples[:, :, :, 0]):
ref_samples = ref_samples[:, :, :, 1:5]
ref_samples = ref_samples.squeeze(1)
# num_ref_seqs = ref_samples.shape[0]
return ref_samples
def normalize_filter_weights(kernel, bias):
"""
Performs output-preserving filter weight matrix normalization for 1-constrained inputs as described
in "Learning Important Features Through Propagating Activation Differences" by Shrikumar et al., 2017
"""
for filter_index in range(kernel.shape[-1]):
bias[filter_index] += np.sum(np.mean(kernel[:, :, filter_index], axis=1))
for pos in range(kernel.shape[0]):
kernel[pos, :, filter_index] -= np.mean(kernel[pos, :, filter_index])
return kernel, bias
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
tfd = tfp.distributions
tfe = tf.contrib.eager
@tfe.run_all_tests_in_graph_and_eager_modes
class ParetoTest(test_case.TestCase):
def _scipy_pareto(self, concentration, scale):
# In scipy pareto is defined with scale = 1, so we need to scale.
return stats.pareto(concentration, scale=scale)
def testParetoShape(self):
scale = tf.constant([2.] * 5)
concentration = tf.constant([2.] * 5)
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(self.evaluate(pareto.batch_shape_tensor()), (5,))
self.assertEqual(pareto.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), [])
self.assertEqual(pareto.event_shape, tf.TensorShape([]))
def testParetoShapeBroadcast(self):
scale = tf.constant([[3., 2.]])
concentration = tf.constant([[4.], [5.], [6.]])
pareto = tfd.Pareto(concentration, scale)
self.assertAllEqual(self.evaluate(pareto.batch_shape_tensor()), (3, 2))
self.assertAllEqual(pareto.batch_shape, tf.TensorShape([3, 2]))
self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), [])
self.assertEqual(pareto.event_shape, tf.TensorShape([]))
def testInvalidScale(self):
invalid_scales = [-.01, 0., -2.]
concentration = 3.
for scale in invalid_scales:
with self.assertRaisesOpError("Condition x > 0"):
pareto = tfd.Pareto(concentration, scale, validate_args=True)
self.evaluate(pareto.scale)
def testInvalidConcentration(self):
scale = 1.
invalid_concentrations = [-.01, 0., -2.]
for concentration in invalid_concentrations:
with self.assertRaisesOpError("Condition x > 0"):
pareto = tfd.Pareto(concentration, scale, validate_args=True)
self.evaluate(pareto.concentration)
def testParetoLogPdf(self):
batch_size = 6
scale = tf.constant([3.] * batch_size)
scale_v = 3.
concentration = tf.constant([2.])
concentration_v = 2.
x = [3., 3.1, 4., 5., 6., 7.]
pareto = tfd.Pareto(concentration, scale)
log_prob = pareto.log_prob(x)
self.assertEqual(log_prob.shape, (6,))
self.assertAllClose(
self.evaluate(log_prob),
self._scipy_pareto(concentration_v, scale_v).logpdf(x))
pdf = pareto.prob(x)
self.assertEqual(pdf.shape, (6,))
self.assertAllClose(
self.evaluate(pdf),
self._scipy_pareto(concentration_v, scale_v).pdf(x))
def testParetoLogPdfValidateArgs(self):
batch_size = 3
scale = tf.constant([2., 3., 4.])
concentration = tf.constant([2.] * batch_size)
pareto = tfd.Pareto(concentration, scale, validate_args=True)
with self.assertRaisesOpError("not in the support"):
x = tf.placeholder_with_default(input=[2., 3., 3.], shape=[3])
log_prob = pareto.log_prob(x)
self.evaluate(log_prob)
with self.assertRaisesOpError("not in the support"):
x = tf.placeholder_with_default(input=[2., 2., 5.], shape=[3])
log_prob = pareto.log_prob(x)
self.evaluate(log_prob)
with self.assertRaisesOpError("not in the support"):
x = tf.placeholder_with_default(input=[1., 3., 5.], shape=[3])
log_prob = pareto.log_prob(x)
self.evaluate(log_prob)
def testParetoLogPdfMultidimensional(self):
batch_size = 6
scale = tf.constant([[2., 4., 5.]] * batch_size)
scale_v = [2., 4., 5.]
concentration = tf.constant([[1.]] * batch_size)
concentration_v = 1.
x = np.array([[6., 7., 9.2, 5., 6., 7.]], dtype=np.float32).T
pareto = tfd.Pareto(concentration, scale)
log_prob = pareto.log_prob(x)
self.assertEqual(log_prob.shape, (6, 3))
self.assertAllClose(
self.evaluate(log_prob),
self._scipy_pareto(concentration_v, scale_v).logpdf(x))
prob = pareto.prob(x)
self.assertEqual(prob.shape, (6, 3))
self.assertAllClose(
self.evaluate(prob),
self._scipy_pareto(concentration_v, scale_v).pdf(x))
def testParetoLogCdf(self):
batch_size = 6
scale = tf.constant([3.] * batch_size)
scale_v = 3.
concentration = tf.constant([2.])
concentration_v = 2.
x = [3., 3.1, 4., 5., 6., 7.]
pareto = tfd.Pareto(concentration, scale)
log_cdf = pareto.log_cdf(x)
self.assertEqual(log_cdf.shape, (6,))
self.assertAllClose(
self.evaluate(log_cdf),
self._scipy_pareto(concentration_v, scale_v).logcdf(x))
cdf = pareto.cdf(x)
self.assertEqual(cdf.shape, (6,))
self.assertAllClose(
self.evaluate(cdf),
self._scipy_pareto(concentration_v, scale_v).cdf(x))
def testParetoLogCdfMultidimensional(self):
batch_size = 6
scale = tf.constant([[2., 4., 5.]] * batch_size)
scale_v = [2., 4., 5.]
concentration = tf.constant([[1.]] * batch_size)
concentration_v = 1.
x = np.array([[6., 7., 9.2, 5., 6., 7.]], dtype=np.float32).T
pareto = tfd.Pareto(concentration, scale)
log_cdf = pareto.log_cdf(x)
self.assertEqual(log_cdf.shape, (6, 3))
self.assertAllClose(
self.evaluate(log_cdf),
self._scipy_pareto(concentration_v, scale_v).logcdf(x))
cdf = pareto.cdf(x)
self.assertEqual(cdf.shape, (6, 3))
self.assertAllClose(
self.evaluate(cdf),
self._scipy_pareto(concentration_v, scale_v).cdf(x))
def testParetoPDFGradientZeroOutsideSupport(self):
scale = tf.constant(1.)
concentration = tf.constant(3.)
# Check the gradient on the undefined portion.
x = scale - 1
pareto = tfd.Pareto(concentration, scale)
compute_pdf = lambda x: pareto.prob(x) # pylint:disable=unnecessary-lambda
self.assertAlmostEqual(self.compute_gradients(
compute_pdf, args=[x])[0], 0.)
def testParetoCDFGradientZeroOutsideSupport(self):
scale = tf.constant(1.)
concentration = tf.constant(3.)
# Check the gradient on the undefined portion.
x = scale - 1
pareto = tfd.Pareto(concentration, scale)
compute_cdf = lambda x: pareto.cdf(x) # pylint:disable=unnecessary-lambda
self.assertAlmostEqual(
self.compute_gradients(
compute_cdf, args=[x])[0], 0.)
def testParetoMean(self):
scale = [1.4, 2., 2.5]
concentration = [2., 3., 2.5]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.mean().shape, (3,))
self.assertAllClose(
self.evaluate(pareto.mean()),
self._scipy_pareto(concentration, scale).mean())
def testParetoMeanInf(self):
scale = [1.4, 2., 2.5]
concentration = [0.4, 0.9, 0.99]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.mean().shape, (3,))
self.assertTrue(
np.all(np.isinf(self.evaluate(pareto.mean()))))
def testParetoVariance(self):
scale = [1.4, 2., 2.5]
concentration = [2., 3., 2.5]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.variance().shape, (3,))
self.assertAllClose(
self.evaluate(pareto.variance()),
self._scipy_pareto(concentration, scale).var())
def testParetoVarianceInf(self):
scale = [1.4, 2., 2.5]
concentration = [0.4, 0.9, 0.99]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.variance().shape, (3,))
self.assertTrue(
np.all(np.isinf(self.evaluate(pareto.variance()))))
def testParetoStd(self):
scale = [1.4, 2., 2.5]
concentration = [2., 3., 2.5]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.stddev().shape, (3,))
self.assertAllClose(
self.evaluate(pareto.stddev()),
self._scipy_pareto(concentration, scale).std())
def testParetoMode(self):
scale = [0.4, 1.4, 2., 2.5]
concentration = [1., 2., 3., 2.5]
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(pareto.mode().shape, (4,))
self.assertAllClose(self.evaluate(pareto.mode()), scale)
def testParetoSampleMean(self):
scale = 4.
concentration = 3.
n = int(100e3)
pareto = tfd.Pareto(concentration, scale)
samples = pareto.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
self._scipy_pareto(concentration, scale).mean(),
rtol=.01,
atol=0)
def testParetoSampleVariance(self):
scale = 1.
concentration = 3.
n = int(400e3)
pareto = tfd.Pareto(concentration, scale)
samples = pareto.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.var(),
self._scipy_pareto(concentration, scale).var(),
rtol=.03,
atol=0)
def testParetoSampleMultidimensionalMean(self):
scale = np.array([np.arange(1, 21, dtype=np.float32)])
concentration = 3.
pareto = tfd.Pareto(concentration, scale)
n = int(100e3)
samples = pareto.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 20))
self.assertEqual(sample_values.shape, (n, 1, 20))
self.assertAllClose(
sample_values.mean(axis=0),
self._scipy_pareto(concentration, scale).mean(),
rtol=.01,
atol=0)
def testParetoSampleMultidimensionalVariance(self):
scale = np.array([np.arange(1, 11, dtype=np.float32)])
concentration = 4.
pareto = tfd.Pareto(concentration, scale)
n = int(800e3)
samples = pareto.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 10))
self.assertEqual(sample_values.shape, (n, 1, 10))
self.assertAllClose(
sample_values.var(axis=0),
self._scipy_pareto(concentration, scale).var(),
rtol=.05,
atol=0)
def testParetoParetoKLFinite(self):
a_scale = np.arange(1.0, 5.0)
a_concentration = 1.0
b_scale = 1.0
b_concentration = np.arange(2.0, 10.0, 2)
a = tfd.Pareto(concentration=a_concentration, scale=a_scale)
b = tfd.Pareto(concentration=b_concentration, scale=b_scale)
true_kl = (b_concentration * (np.log(a_scale) - np.log(b_scale)) +
np.log(a_concentration) - np.log(b_concentration) +
b_concentration / a_concentration - 1.0)
kl = tfd.kl_divergence(a, b)
x = a.sample(int(1e5), seed=0)
kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), 0)
kl_, kl_sample_ = self.evaluate([kl, kl_sample])
self.assertAllEqual(true_kl, kl_)
self.assertAllClose(true_kl, kl_sample_, atol=0., rtol=1e-2)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(true_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
def testParetoParetoKLInfinite(self):
a = tfd.Pareto(concentration=1.0, scale=1.0)
b = tfd.Pareto(concentration=1.0, scale=2.0)
kl = tfd.kl_divergence(a, b)
kl_ = self.evaluate(kl)
self.assertAllEqual(np.inf, kl_)
if __name__ == "__main__":
tf.test.main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmdet.models.YOLOXHead.get_bboxes')
def yolox_head__get_bboxes(ctx,
self,
cls_scores,
bbox_preds,
objectnesses,
img_metas=None,
cfg=None,
rescale=False,
with_nms=True):
"""Rewrite `get_bboxes` of `YOLOXHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx: Context that contains original meta information.
self: Represent the instance of the original class.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
img_metas (list[dict]): Image meta info. Default None.
cfg (mmcv.Config, Optional): Test / postprocessing configuration,
if None, test_cfg would be used. Default None.
rescale (bool): If True, return boxes in original image space.
Default False.
with_nms (bool): If True, do nms before return boxes.
Default True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor,
where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds) == len(objectnesses)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(batch_size, -1)
for objectness in objectnesses
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
score_factor = torch.cat(flatten_objectness, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_priors = torch.cat(mlvl_priors)
bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
# directly multiply score factor and feed to nms
scores = cls_scores * (score_factor.unsqueeze(-1))
if not with_nms:
return bboxes, scores
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
return multiclass_nms(bboxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold, pre_top_k,
keep_top_k)
|
#!/usr/bin/python3
import os
import numpy as np
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--disparity', type=str,
help='Input x-disparity map', required=True)
parser.add_argument('--ground_truth', type=str,
help='Ground truth keypoints', required=True)
args = parser.parse_args()
# Load disparity map
pred_disparity = np.loadtxt(args.disparity, skiprows=1)
print("Input: {}".format(os.path.abspath(args.disparity)))
# Calculate disparity from keypoints:
keypoints = np.loadtxt(args.ground_truth, skiprows=1, delimiter=" ")
print("Ground truth: {}".format(os.path.abspath(args.ground_truth)))
gt_xdisp = keypoints[:,0] - keypoints[:,2]
gt_coords = keypoints[:,:2]
err = []
for keypoint in keypoints:
gt_xdisp = keypoint[2] - keypoint[0]
x, y = keypoint[:2].astype(int)
pred_xdisp = pred_disparity[y, x]
if pred_xdisp == 0:
continue
err.append(gt_xdisp - pred_xdisp)
err = np.array(err)
print("Note: statistics are computed for supplied keypoints only")
print("Mean disparity error: {:1.2f} px".format(err.mean()))
print("Median disparity error: {:1.2f} px".format(np.median(err)))
print("Stdev disparity error: {:1.2f} px".format(err.std()))
print("Pixels with err > 0.5px: {:1.2f}%".format(100*sum(np.abs(err) > 0.5)/len(err)))
print("Err > 1.0: {:1.2f}%".format(100*sum(np.abs(err) > 1.0)/len(err)))
print("Err > 2.0: {:1.2f}%".format(100*sum(np.abs(err) > 2.0)/len(err)))
print("Err > 4.0: {:1.2f}%".format(100*sum(np.abs(err) > 4.0)/len(err)))
|
# -*- coding: utf-8 -*-
translations = {
# Days
'days': {
0: 'Sondag',
1: 'Maandag',
2: 'Dinsdag',
3: 'Woensdag',
4: 'Donderdag',
5: 'Vrydag',
6: 'Saterdag'
},
'days_abbrev': {
0: 'Son',
1: 'Maa',
2: 'Din',
3: 'Woe',
4: 'Don',
5: 'Vry',
6: 'Sat'
},
# Months
'months': {
1: 'Januarie',
2: 'Februarie',
3: 'Maart',
4: 'April',
5: 'Mei',
6: 'Junie',
7: 'Julie',
8: 'Augustus',
9: 'September',
10: 'Oktober',
11: 'November',
12: 'Desember',
},
'months_abbrev': {
1: 'Jan',
2: 'Feb',
3: 'Mrt',
4: 'Apr',
5: 'Mei',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Okt',
11: 'Nov',
12: 'Des',
},
# Units of time
'year': ['{count} jaar', '{count} jare'],
'month': ['{count} maand', '{count} maande'],
'week': ['{count} week', '{count} weke'],
'day': ['{count} dag', '{count} dae'],
'hour': ['{count} uur', '{count} ure'],
'minute': ['{count} minuut', '{count} minute'],
'second': ['{count} sekond', '{count} sekondes'],
# Relative time
'ago': '{time} terug',
'from_now': '{time} van nou af',
'after': '{time} na',
'before': '{time} voor',
# Meridians
'meridian': lambda time: 'VM' if 0 <= time[0] < 12 else 'NM',
# Date formats
'date_formats': {
'LTS': 'HH:mm:ss',
'LT': 'HH:mm',
'LLLL': 'dddd, D MMMM YYYY HH:mm',
'LLL': 'D MMMM YYYY HH:mm',
'LL': 'D MMMM YYYY',
'L': 'DD/MM/YYYY',
},
}
|
import os
import time
import numpy as np
import pandas as pd
from importlib import reload
import pyscf
from pyscf import __config__
from pyscf.pbc import tools
ALLOWED_ENGINES = ["FFTW", "NUMPY", "NUMPY+BLAS", "BLAS"]
def bench_fft_engine(method: str, mesh_size: int):
# Check inputs (in case this is used as solo)
if method not in ALLOWED_ENGINES:
msg = f"{method} is not an allowd FFT engine ({ALLOWED_ENGINES})"
raise ValueError(msg)
# Set the engine
__config__.pbc_tools_pbc_fft_engine = method
reload(pyscf.pbc.tools.pbc) # reload so we see updated config
a = np.random.random([2, mesh_size, mesh_size, mesh_size])
# Time FFT
_t0 = time.perf_counter()
tools.fft(a, [mesh_size, mesh_size, mesh_size])
total_time = time.perf_counter() - _t0
print(f"FFT TIME {total_time}")
return total_time
def bench_all_fft_engines(mesh_size: int):
# Setup data directory
os.makedirs("_data", exist_ok=True)
filename = "_data/bench_fft_data.csv"
# Read in old data so we can append if necessary
if os.path.exists(filename):
data = pd.read_csv(filename)
print(data)
data = data.to_dict(orient="list")
else:
data = {
"FFT Engine": [],
"Time (s)": [],
"OMP_NUM_THREADS": [],
"MESH_SIZE": [],
}
# Iterate through all engines
for eng in ALLOWED_ENGINES:
total_time = bench_fft_engine(eng, mesh_size=mesh_size)
data["FFT Engine"].append(eng)
data["Time (s)"].append(total_time)
data["OMP_NUM_THREADS"].append(int(os.environ["OMP_NUM_THREADS"]))
data["MESH_SIZE"].append(mesh_size)
pd.DataFrame(data).to_csv(filename, index=False)
return data
if __name__ == "__main__":
# Read CLIs
import argparse
parser = argparse.ArgumentParser(
description="Benchmark the FFT engine options for PySCF"
)
parser.add_argument(
"--mesh_size",
type=int,
required=True,
help="The size of the mesh used in the problem. The larger the mesh, the more memory and operations FFT requires.",
)
args = parser.parse_args()
mesh_size = args.mesh_size
# For debugging
# bench_fft_engine("FFTW", mesh_size)
# bench_fft_engine("NUMPY", mesh_size)
# bench_fft_engine("NUMPY+BLAS", mesh_size)
# bench_fft_engine("BLAS", mesh_size)
data = bench_all_fft_engines(mesh_size)
|
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# 외부 모듈 socket 사용함으로 설치해줄 것
# 다른 버전의 PYthon 작동은 확인 안 해봄
import socket
Receive_Buffersize = 4096
def ipcheck():
return socket.gethostbyname(socket.getfqdn())
class TcpNet:
def __init__(self): # 생성자
self.com_socket=socket.socket() # 소켓객체생성
self.Connection=self.com_socket # 소캣이랑 연결하기
def Accept(self,IP,Port): # 주소랑 포트에 열어주기
self.com_socket.bind((IP,Port))
self.com_socket.listen(10);
self.Connection, self.address = self.com_socket.accept()
def Connect(self,IP,Port): # 그 주소의 포트에 연결하기
self.com_socket.connect((IP,Port))
def Send(self,bdta): # 보내기 (binary) 형식임으로 주의할 것
self.Conncetion.send(bdta)
def SendStr(self,Str1): # 보내기 String 형식으로 보내기
self.Connection.send(bytes(Str1,"UTF-8"))
def Receive(self): # 받기 (binary) 형식임으로 주의할 것
return self.Connection.recv(Receive_Buffersize)
def ReceiveStr(self): # 받기 String 형식으로 받기
return self.Connection.recv(Receive_Buffersize).decode("UTF-8")
def Socket_close(self): # 소켓 닫기 (미완성)
self.address.close()
# In[ ]:
|
# -*- coding: utf-8 -*-
# File: varreplace.py
# Credit: Qinyao He
from contextlib import contextmanager
import tensorflow as tf
from .common import get_tf_version_tuple
__all__ = ['custom_getter_scope', 'freeze_variables', 'remap_variables']
@contextmanager
def custom_getter_scope(custom_getter):
"""
Args:
custom_getter: the same as in :func:`tf.get_variable`
Returns:
The current variable scope with a custom_getter.
"""
scope = tf.get_variable_scope()
if get_tf_version_tuple() >= (1, 5):
with tf.variable_scope(
scope, custom_getter=custom_getter,
auxiliary_name_scope=False):
yield
else:
ns = tf.get_default_graph().get_name_scope()
with tf.variable_scope(
scope, custom_getter=custom_getter):
with tf.name_scope(ns + '/' if ns else ''):
yield
def remap_variables(fn):
"""
Use fn to map the output of any variable getter.
Args:
fn (tf.Variable -> tf.Tensor)
Returns:
The current variable scope with a custom_getter that maps
all the variables by fn.
Example:
.. code-block:: python
with varreplace.remap_variables(lambda var: quantize(var)):
x = FullyConnected('fc', x, 1000) # fc/{W,b} will be quantized
"""
def custom_getter(getter, *args, **kwargs):
v = getter(*args, **kwargs)
return fn(v)
return custom_getter_scope(custom_getter)
def freeze_variables(stop_gradient=True, skip_collection=False):
"""
Return a context to freeze variables,
by wrapping ``tf.get_variable`` with a custom getter.
It works by either applying ``tf.stop_gradient`` on the variables,
or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
both.
Example:
.. code-block:: python
with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
x = FullyConnected('fc', x, 1000) # fc/* will not be trained
Args:
stop_gradient (bool): if True, variables returned from `get_variable`
will be wrapped with `tf.stop_gradient` and therefore has no
gradient when used later.
Note that the created variables may still have gradient when accessed
by other approaches (e.g. by name, or by collection).
Also note that this makes `tf.get_variable` returns a Tensor instead of a Variable,
which may break existing code.
Therefore, it's recommended to use the `skip_collection` option instead.
skip_collection (bool): if True, do not add the variable to
``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``
collection. As a result they will not be trained by default.
"""
def custom_getter(getter, *args, **kwargs):
trainable = kwargs.get('trainable', True)
name = args[0] if len(args) else kwargs.get('name')
if skip_collection:
kwargs['trainable'] = False
v = getter(*args, **kwargs)
if skip_collection:
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
if trainable and stop_gradient:
v = tf.stop_gradient(v, name='freezed_' + name)
return v
return custom_getter_scope(custom_getter)
|
import torch
import numpy as np
import torch.nn as nn
import time
import logging
from torch.autograd import Variable
from utils import mask_softmax
def _concat(xs, idx = None):
if idx == None:
return torch.cat([x.view(-1) for x in xs])
else:
return torch.cat([x.view(-1) for i, x in enumerate(xs) if i in idx])
class Architect(object):
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.lr = args.arch_learning_rate
self.optimizer = torch.optim.Adam(self.model.arch_parameters(),
lr=self.lr, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
#self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size = 1, gamma=0.8)
def _compute_unrolled_model(self, input, target, eta, network_optimizer, darts, grow = False):
#if grow:
# new_model = self.model.new()
# loss = new_model._loss(input, target, grow)
#else:
loss = self.model._loss(input, target, grow)
if darts:
grads_all = torch.autograd.grad(loss, self.model.parameters())
idx_use = None
theta = _concat(self.model.parameters()).data
#elif grow:
# grads_all = torch.autograd.grad(loss, new_model.parameters(), allow_unused=True)
# idx_use = None
# theta = _concat(new_model.parameters()).data
else:
grads_all = torch.autograd.grad(loss, self.model.parameters(), allow_unused=True)
idx_use = tuple(i for i in range(len(grads_all)) if grads_all[i] is not None )
theta = _concat(self.model.parameters(),idx_use).data
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for i,v in enumerate(self.model.parameters()) if i in idx_use).mul_(self.network_momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(grads_all, idx_use).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(moment+dtheta, alpha = eta),idx_use)
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled, darts = False):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer, darts)
else:
self._backward_step(input_valid, target_valid)
#logging.info("normal_alphas grad: ")
#logging.info(self.model.alphas_reduce.grad)
#logging.info("reduce_alphas grad: ")
#logging.info(self.model.alphas_reduce.grad)
# eliminate unwanted grad noise
if not darts:
self.model.alphas_normal.grad *= self.model.normal_indicator
self.model.alphas_reduce.grad *= self.model.reduce_indicator
if not hasattr(self,"normal_grad") or self.normal_grad is None:
self.normal_grad = self.model.alphas_normal.grad.clone()
else:
self.normal_grad+=self.model.alphas_normal.grad
if not hasattr(self,"reduce_grad") or self.reduce_grad is None:
self.reduce_grad = self.model.alphas_reduce.grad.clone()
else:
self.reduce_grad+=self.model.alphas_reduce.grad
nn.utils.clip_grad_norm_(self.model.arch_parameters(), 5)
self.optimizer.step()
def print_arch_grad(self):
logging.info("normal_alphas grad: ")
logging.info(self.normal_grad)
logging.info("reduce_alphas grad: ")
logging.info(self.reduce_grad)
self.normal_grad = None
self.reduce_grad = None
def grow_step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled, darts = False):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer, darts, grow=True)
else:
self._backward_step(input_valid, target_valid, grow = True)
#if not grow:
# grow normal
if not hasattr(self,"normal_grad") or self.normal_grad is None:
self.normal_grad = self.model.alphas_normal.grad.clone()
else:
self.normal_grad+=self.model.alphas_normal.grad
if not hasattr(self,"reduce_grad") or self.reduce_grad is None:
self.reduce_grad = self.model.alphas_reduce.grad.clone()
else:
self.reduce_grad+=self.model.alphas_reduce.grad
def grow(self, num_grow):
n_row = self.model.normal_indicator.size(0)
n_col = self.model.normal_indicator.size(1)
max_grad = [0 for i in range(num_grow)]
normal_loc = None
normal_list = []
for i in range(n_row):
for j in range(n_col):
if self.model.normal_indicator[i,j]==0:
cur_grad = self.normal_grad[i,j]
normal_list.append((cur_grad, (i,j)))
normal_list.sort(key = lambda x:x[0], reverse = True)
normal_loc = [normal_list[i][1] for i in range(num_grow)]
n_row = self.model.reduce_indicator.size(0)
n_col = self.model.reduce_indicator.size(1)
max_grad = [0 for i in range(num_grow)]
reduce_loc = None
reduce_list = []
for i in range(n_row):
for j in range(n_col):
if self.model.reduce_indicator[i,j]==0:
cur_grad = self.reduce_grad[i,j]
reduce_list.append((cur_grad, (i,j)))
reduce_list.sort(key = lambda x:x[0], reverse = True)
reduce_loc = [reduce_list[i][1] for i in range(num_grow)]
logging.info("normal_alphas grad: ")
logging.info(self.normal_grad)
logging.info("reduce_alphas grad: ")
logging.info(self.reduce_grad)
logging.info("activated normal_idx: ")
logging.info(normal_loc)
logging.info("activated reduce_idx: ")
logging.info(reduce_loc)
self.model.activate(normal_loc, reduce_loc)
#logging.info(self.model.alphas_normal)
#logging.info(self.model.alphas_reduce)
self.normal_grad = None
self.reduce_grad = None
#for param_group in self.optimizer.param_groups:
# param_group["lr"] = self.lr
#self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size = 1, gamma=0.8)
def _backward_step(self, input_valid, target_valid, grow = False):
if grow:
model_new = self.model.new()
loss = model_new._loss(input_valid, target_valid, grow)
loss.backward()
self.model.alphas_normal.grad = model_new.alphas_normal.grad.clone()
self.model.alphas_reduce.grad = model_new.alphas_reduce.grad.clone()
else:
loss = self.model._loss(input_valid, target_valid, grow)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, darts, grow=False):
unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer, darts, grow=grow)
unrolled_loss = unrolled_model._loss(input_valid, target_valid, grow)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
if darts:
vector = [v.grad.data for v in unrolled_model.parameters()]
else:
vector = []
for i,v in enumerate(unrolled_model.parameters()):
if v.grad is not None:
vector.append(v.grad.data)
else:
vector.append(None)
implicit_grads = self._hessian_vector_product(vector, input_train, target_train, darts=darts, grow=grow)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(ig.data, alpha = eta)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = g.data
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta, idx):
model_new = self.model.new()
params, offset = {}, 0
if idx is None:
model_dict = self.model.state_dict()
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
else:
for i, (k, v) in enumerate(self.model.named_parameters()):
if i in idx:
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
#use self defined function to accelerate
model_new.load_my_state_dict(params)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2, darts=False, grow = False):
if darts:
vector_concat = vector
else:
vector_concat = list(filter(lambda x: x is not None, vector))
R = r / _concat(vector_concat).norm()
for p, v in zip(self.model.parameters(), vector):
if v is not None:
p.data.add_(v, alpha = R)
loss = self.model._loss(input, target, grow)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters(), allow_unused = True)
for p, v in zip(self.model.parameters(), vector):
if v is not None:
p.data.sub_(v, alpha = 2*R)
loss = self.model._loss(input, target, grow)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters(), allow_unused = True)
for p, v in zip(self.model.parameters(), vector):
if v is not None:
p.data.add_(v, alpha = R)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
|
from picamera import PiCamera
from time import sleep
import warnings
warnings.filterwarnings('default', category=DeprecationWarning)
camera = PiCamera()
camera.rotation = 180
# Capturing image to stream: https://picamera.readthedocs.io/en/release-1.13/recipes1.html
# Photo
camera.resolution = (3280, 2464)
sleep(2)
camera.capture('./photo.jpg')
|
from poetry import packages as poetry_pkg
def python_dependency_from_pep_508(name):
dep = poetry_pkg.dependency_from_pep_508(name)
dep._name = f"pypkg-{dep.name}"
dep._pretty_name = f"pypkg-{dep.pretty_name}"
return dep
|
# pylint: disable=wrong-import-position
"""
The ``mlflow`` module provides a high-level "fluent" API for starting and managing MLflow runs.
For example:
.. code:: python
import mlflow
mlflow.start_run()
mlflow.log_param("my", "param")
mlflow.log_metric("score", 100)
mlflow.end_run()
You can also use the context manager syntax like this:
.. code:: python
with mlflow.start_run() as run:
mlflow.log_param("my", "param")
mlflow.log_metric("score", 100)
which automatically terminates the run at the end of the ``with`` block.
The fluent tracking API is not currently threadsafe. Any concurrent callers to the tracking API must
implement mutual exclusion manually.
For a lower level API, see the :py:mod:`mlflow.tracking` module.
"""
from mlflow.version import VERSION as __version__ # pylint: disable=unused-import
from mlflow.utils.logging_utils import _configure_mlflow_loggers
import mlflow.tracking._model_registry.fluent
import mlflow.tracking.fluent
# Filter annoying Cython warnings that serve no good purpose, and so before
# importing other modules.
# See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed") # noqa: E402
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # noqa: E402
import mlflow.projects as projects # noqa: E402
import mlflow.tracking as tracking # noqa: E402
# model flavors
_model_flavors_supported = []
try:
# pylint: disable=unused-import
import mlflow.catboost as catboost # noqa: E402
import mlflow.fastai as fastai # noqa: E402
import mlflow.gluon as gluon # noqa: E402
import mlflow.h2o as h2o # noqa: E402
import mlflow.keras as keras # noqa: E402
import mlflow.lightgbm as lightgbm # noqa: E402
import mlflow.mleap as mleap # noqa: E402
import mlflow.onnx as onnx # noqa: E402
import mlflow.pyfunc as pyfunc # noqa: E402
import mlflow.pytorch as pytorch # noqa: E402
import mlflow.sklearn as sklearn # noqa: E402
import mlflow.spacy as spacy # noqa: E402
import mlflow.spark as spark # noqa: E402
import mlflow.statsmodels as statsmodels # noqa: E402
import mlflow.tensorflow as tensorflow # noqa: E402
import mlflow.xgboost as xgboost # noqa: E402
import mlflow.shap as shap # noqa: E402
_model_flavors_supported = [
"catboost",
"fastai",
"gluon",
"h2o",
"keras",
"lightgbm",
"mleap",
"onnx",
"pyfunc",
"pytorch",
"sklearn",
"spacy",
"spark",
"statsmodels",
"tensorflow",
"xgboost",
"shap",
]
except ImportError as e:
# We are conditional loading these commands since the skinny client does
# not support them due to the pandas and numpy dependencies of MLflow Models
pass
_configure_mlflow_loggers(root_module_name=__name__)
# TODO: Uncomment this block when deprecating Python 3.6 support
# _major = 3
# _minor = 6
# _deprecated_version = (_major, _minor)
# _min_supported_version = (_major, _minor + 1)
# if sys.version_info[:2] == _deprecated_version:
# warnings.warn(
# "MLflow support for Python {dep_ver} is deprecated and will be dropped in "
# "an upcoming release. At that point, existing Python {dep_ver} workflows "
# "that use MLflow will continue to work without modification, but Python {dep_ver} "
# "users will no longer get access to the latest MLflow features and bugfixes. "
# "We recommend that you upgrade to Python {min_ver} or newer.".format(
# dep_ver=".".join(map(str, _deprecated_version)),
# min_ver=".".join(map(str, _min_supported_version)),
# ),
# FutureWarning,
# stacklevel=2,
# )
ActiveRun = mlflow.tracking.fluent.ActiveRun
log_param = mlflow.tracking.fluent.log_param
log_metric = mlflow.tracking.fluent.log_metric
set_tag = mlflow.tracking.fluent.set_tag
delete_tag = mlflow.tracking.fluent.delete_tag
log_artifacts = mlflow.tracking.fluent.log_artifacts
log_artifact = mlflow.tracking.fluent.log_artifact
log_text = mlflow.tracking.fluent.log_text
log_dict = mlflow.tracking.fluent.log_dict
log_image = mlflow.tracking.fluent.log_image
log_figure = mlflow.tracking.fluent.log_figure
active_run = mlflow.tracking.fluent.active_run
get_run = mlflow.tracking.fluent.get_run
start_run = mlflow.tracking.fluent.start_run
end_run = mlflow.tracking.fluent.end_run
search_runs = mlflow.tracking.fluent.search_runs
list_run_infos = mlflow.tracking.fluent.list_run_infos
get_artifact_uri = mlflow.tracking.fluent.get_artifact_uri
set_tracking_uri = tracking.set_tracking_uri
set_registry_uri = tracking.set_registry_uri
get_experiment = mlflow.tracking.fluent.get_experiment
get_experiment_by_name = mlflow.tracking.fluent.get_experiment_by_name
get_tracking_uri = tracking.get_tracking_uri
get_registry_uri = tracking.get_registry_uri
create_experiment = mlflow.tracking.fluent.create_experiment
set_experiment = mlflow.tracking.fluent.set_experiment
log_params = mlflow.tracking.fluent.log_params
log_metrics = mlflow.tracking.fluent.log_metrics
set_tags = mlflow.tracking.fluent.set_tags
delete_experiment = mlflow.tracking.fluent.delete_experiment
delete_run = mlflow.tracking.fluent.delete_run
register_model = mlflow.tracking._model_registry.fluent.register_model
autolog = mlflow.tracking.fluent.autolog
run = projects.run
__all__ = [
"ActiveRun",
"log_param",
"log_params",
"log_metric",
"log_metrics",
"set_tag",
"set_tags",
"delete_tag",
"log_artifacts",
"log_artifact",
"log_text",
"log_dict",
"log_figure",
"log_image",
"active_run",
"start_run",
"end_run",
"search_runs",
"get_artifact_uri",
"get_tracking_uri",
"set_tracking_uri",
"get_experiment",
"get_experiment_by_name",
"create_experiment",
"set_experiment",
"delete_experiment",
"get_run",
"delete_run",
"run",
"register_model",
"get_registry_uri",
"set_registry_uri",
"list_run_infos",
"autolog",
] + _model_flavors_supported
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/5/4 15:12
Desc: 东方财富网-数据中心-研究报告-盈利预测
http://data.eastmoney.com/report/profitforecast.jshtml
"""
from datetime import datetime
import pandas as pd
import requests
from tqdm import tqdm
def stock_profit_forecast():
"""
东方财富网-数据中心-研究报告-盈利预测
http://data.eastmoney.com/report/profitforecast.jshtml
:return: 盈利预测
:rtype: pandas.DataFrame
"""
url = "http://reportapi.eastmoney.com/report/predic"
date_now = datetime.now().date().isoformat()
date_previous = date_now.replace(date_now[:4], str(int(date_now[:4])-2))
params = {
'dyCode': '*',
'pageNo': '1',
'pageSize': '100',
'fields': '',
'beginTime': date_previous,
'endTime': date_now,
'hyCode': '*',
'gnCode': '*',
'marketCode': '*',
'sort': 'count,desc',
'p': '1',
'pageNum': '1',
'_': '1615374649216',
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json['TotalPage']
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1)):
params = {
'dyCode': '*',
'pageNo': page,
'pageSize': '100',
'fields': '',
'beginTime': date_previous,
'endTime': date_now,
'hyCode': '*',
'gnCode': '*',
'marketCode': '*',
'sort': 'count,desc',
'p': page,
'pageNum': page,
'_': '1615374649216',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df)+1)
big_df.columns = [
'序号',
'名称',
'代码',
'研报数',
'机构投资评级(近六个月)-买入',
'机构投资评级(近六个月)-增持',
'机构投资评级(近六个月)-中性',
'机构投资评级(近六个月)-减持',
'机构投资评级(近六个月)-卖出',
'_',
'_',
'_',
'_',
f'{int(date_previous[:4])+2}预测每股收益',
'_',
'_',
f'{int(date_previous[:4])+3}预测每股收益',
f'{int(date_previous[:4])+4}预测每股收益',
'_',
'_',
'_',
'_',
'_',
f'{int(date_previous[:4])+1}预测每股收益',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
big_df = big_df[[
'序号',
'代码',
'名称',
'研报数',
'机构投资评级(近六个月)-买入',
'机构投资评级(近六个月)-增持',
'机构投资评级(近六个月)-中性',
'机构投资评级(近六个月)-减持',
'机构投资评级(近六个月)-卖出',
f'{int(date_previous[:4])+1}预测每股收益',
f'{int(date_previous[:4])+2}预测每股收益',
f'{int(date_previous[:4])+3}预测每股收益',
f'{int(date_previous[:4])+4}预测每股收益',
]]
return big_df
if __name__ == '__main__':
stock_profit_forecast_df = stock_profit_forecast()
print(stock_profit_forecast_df)
|
from twisted.trial import unittest
from axiom import store
from xmantissa import ixmantissa, endpoint
class MantissaQ2Q(unittest.TestCase):
def testInstallation(self):
d = self.mktemp()
s = store.Store(unicode(d))
q = endpoint.UniversalEndpointService(store=s)
q.installOn(s)
self.assertIdentical(ixmantissa.IQ2QService(s), q)
|
"""SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
"""
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ['SMTPException', 'SMTPServerDisconnected',
'SMTPResponseException', 'SMTPSenderRefused', 'SMTPRecipientsRefused',
'SMTPDataError', 'SMTPConnectError', 'SMTPHeloError',
'SMTPAuthenticationError', 'quoteaddr', 'quotedata', 'SMTP']
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = '\r\n'
bCRLF = b'\r\n'
_MAXLINE = 8192
OLDSTYLE_AUTH = re.compile('auth=(.*)', re.I)
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = code, msg
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = code, msg, sender
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = recipients,
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
if addrstring.strip().startswith('<'):
return addrstring
return '<%s>' % addrstring
return '<%s>' % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
return addrstring
return addr
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub('(?m)^\\.', '..', re.sub('(?:\\r\\n|\\n|\\r(?!\\n))',
CRLF, data))
def _quote_periods(bindata):
return re.sub(b'(?m)^\\.', b'..', bindata)
def _fix_eols(data):
return re.sub('(?:\\r\\n|\\n|\\r(?!\\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = 'ehlo'
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None, timeout=socket
._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
if host:
code, msg = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd('QUIT')
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print(datetime.datetime.now().time(), *args, file=sys.stderr)
else:
print(*args, file=sys.stderr)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout, self.
source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and host.find(':') == host.rfind(':'):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError('nonnumeric port')
if not port:
port = self.default_port
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
code, msg = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return code, msg
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if hasattr(self, 'sock') and self.sock:
if isinstance(s, str):
s = s.encode(self.command_encoding)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=''):
"""Send a command to the server."""
if args == '':
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected(
'Connection unexpectedly closed: ' + str(e))
if not line:
self.close()
raise SMTPServerDisconnected('Connection unexpectedly closed')
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, 'Line too long.')
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
try:
errcode = int(code)
except ValueError:
errcode = -1
break
if line[3:4] != b'-':
break
errmsg = b'\n'.join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode,
errmsg))
return errcode, errmsg
def docmd(self, cmd, args=''):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd('helo', name or self.local_hostname)
code, msg = self.getreply()
self.helo_resp = msg
return code, msg
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
code, msg = self.getreply()
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected('Server not connected')
self.ehlo_resp = msg
if code != 250:
return code, msg
self.does_esmtp = 1
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode('latin-1').split('\n')
del resp[0]
for each in resp:
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
self.esmtp_features['auth'] = self.esmtp_features.get('auth',
'') + ' ' + auth_match.groups(0)[0]
continue
m = re.match('(?P<feature>[A-Za-z0-9][A-Za-z0-9\\-]*) ?', each)
if m:
feature = m.group('feature').lower()
params = m.string[m.end('feature'):].strip()
if feature == 'auth':
self.esmtp_features[feature] = self.esmtp_features.get(
feature, '') + ' ' + params
else:
self.esmtp_features[feature] = params
return code, msg
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd('help', args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd('rset')
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd('noop')
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower() == 'smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd('mail', 'FROM:%s%s' % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd('rcpt', 'TO:%s%s' % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd('data')
code, repl = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b'.' + bCRLF
self.send(q)
code, msg = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return code, msg
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd('vrfy', _addr_only(address))
return self.getreply()
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd('expn', _addr_only(address))
return self.getreply()
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not 200 <= self.ehlo()[0] <= 299:
code, resp = self.helo()
if not 200 <= code <= 299:
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
bytes data that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
mechanism = mechanism.upper()
initial_response = authobject() if initial_response_ok else None
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
code, resp = self.docmd('AUTH', mechanism + ' ' + response)
else:
code, resp = self.docmd('AUTH', mechanism)
if code == 334:
challenge = base64.decodebytes(resp)
response = encode_base64(authobject(challenge).encode('ascii'),
eol='')
code, resp = self.docmd(response)
if code in (235, 503):
return code, resp
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
if challenge is None:
return None
return self.user + ' ' + hmac.HMAC(self.password.encode('ascii'),
challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return '\x00%s\x00%s' % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn('auth'):
raise SMTPNotSupportedError(
'SMTP AUTH extension not supported by server.')
advertised_authlist = self.esmtp_features['auth'].split()
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
authlist = [auth for auth in preferred_auths if auth in
advertised_authlist]
if not authlist:
raise SMTPException('No suitable authentication method found.')
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
code, resp = self.auth(authmethod, getattr(self,
method_name), initial_response_ok=initial_response_ok)
if code in (235, 503):
return code, resp
except SMTPAuthenticationError as e:
last_exception = e
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn('starttls'):
raise SMTPNotSupportedError(
'STARTTLS extension not supported by server.')
resp, reply = self.docmd('STARTTLS')
if resp == 220:
if not _have_ssl:
raise RuntimeError('No SSL support included in this Python')
if context is not None and keyfile is not None:
raise ValueError(
'context and keyfile arguments are mutually exclusive')
if context is not None and certfile is not None:
raise ValueError(
'context and certfile arguments are mutually exclusive')
if keyfile is not None or certfile is not None:
import warnings
warnings.warn(
'keyfile and certfile are deprecated, use acustom context instead'
, DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock, server_hostname=self
._host)
self.file = None
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
raise SMTPResponseException(resp, reply)
return resp, reply
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append('size=%d' % len(msg))
for option in mail_options:
esmtp_opts.append(option)
code, resp = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
code, resp = self.rcpt(each, rcpt_options)
if code != 250 and code != 251:
senderrs[each] = code, resp
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
self._rset()
raise SMTPRecipientsRefused(senderrs)
code, resp = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None, mail_options
=[], rcpt_options={}):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block"
)
if from_addr is None:
from_addr = msg[header_prefix + 'Sender'
] if header_prefix + 'Sender' in msg else msg[header_prefix +
'From']
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'], msg[
header_prefix + 'Bcc'], msg[header_prefix + 'Cc']) if f is not
None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
"""""".join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
'One or more source or delivery addresses require internationalized email support, but the server does not advertise the required SMTPUTF8 capability'
)
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(bytesmsg, policy=msg.
policy.clone(utf8=True))
mail_options += ['SMTPUTF8', 'BODY=8BITMIME']
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd('quit')
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None, keyfile=
None, certfile=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError(
'context and keyfile arguments are mutually exclusive')
if context is not None and certfile is not None:
raise ValueError(
'context and certfile arguments are mutually exclusive')
if keyfile is not None or certfile is not None:
import warnings
warnings.warn(
'keyfile and certfile are deprecated, use acustom context instead'
, DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout,
self.source_address)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append('SMTP_SSL')
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = 'lhlo'
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname=local_hostname,
source_address=source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port, source_address=source_address
)
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
code, msg = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return code, msg
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ': ')
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt('From')
toaddrs = prompt('To').split(',')
print('Enter message, end with ^D:')
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print('Message length is %d' % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
""" Q_BRIDGE_MIB
The VLAN Bridge MIB module for managing Virtual Bridged
Local Area Networks, as defined by IEEE 802.1Q\-2003,
including Restricted Vlan Registration defined by
IEEE 802.1u\-2001 and Vlan Classification defined by
IEEE 802.1v\-2001.
Copyright (C) The Internet Society (2006). This version of
this MIB module is part of RFC 4363; See the RFC itself for
full legal notices.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class QBRIDGEMIB(Entity):
"""
.. attribute:: dot1qbase
**type**\: :py:class:`Dot1qBase <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qBase>`
**config**\: False
.. attribute:: dot1qvlan
**type**\: :py:class:`Dot1qVlan <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlan>`
**config**\: False
.. attribute:: dot1qfdbtable
A table that contains configuration and control information for each Filtering Database currently operating on this device. Entries in this table appear automatically when VLANs are assigned FDB IDs in the dot1qVlanCurrentTable
**type**\: :py:class:`Dot1qFdbTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qFdbTable>`
**config**\: False
.. attribute:: dot1qtpfdbtable
A table that contains information about unicast entries for which the device has forwarding and/or filtering information. This information is used by the transparent bridging function in determining how to propagate a received frame
**type**\: :py:class:`Dot1qTpFdbTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qTpFdbTable>`
**config**\: False
.. attribute:: dot1qtpgrouptable
A table containing filtering information for VLANs configured into the bridge by (local or network) management, or learned dynamically, specifying the set of ports to which frames received on a VLAN for this FDB and containing a specific Group destination address are allowed to be forwarded
**type**\: :py:class:`Dot1qTpGroupTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qTpGroupTable>`
**config**\: False
.. attribute:: dot1qforwardalltable
A table containing forwarding information for each VLAN, specifying the set of ports to which forwarding of all multicasts applies, configured statically by management or dynamically by GMRP. An entry appears in this table for all VLANs that are currently instantiated
**type**\: :py:class:`Dot1qForwardAllTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qForwardAllTable>`
**config**\: False
.. attribute:: dot1qforwardunregisteredtable
A table containing forwarding information for each VLAN, specifying the set of ports to which forwarding of multicast group\-addressed frames for which no more specific forwarding information applies. This is configured statically by management and determined dynamically by GMRP. An entry appears in this table for all VLANs that are currently instantiated
**type**\: :py:class:`Dot1qForwardUnregisteredTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qForwardUnregisteredTable>`
**config**\: False
.. attribute:: dot1qstaticunicasttable
A table containing filtering information for Unicast MAC addresses for each Filtering Database, configured into the device by (local or network) management specifying the set of ports to which frames received from specific ports and containing specific unicast destination addresses are allowed to be forwarded. A value of zero in this table (as the port number from which frames with a specific destination address are received) is used to specify all ports for which there is no specific entry in this table for that particular destination address. Entries are valid for unicast addresses only
**type**\: :py:class:`Dot1qStaticUnicastTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticUnicastTable>`
**config**\: False
.. attribute:: dot1qstaticmulticasttable
A table containing filtering information for Multicast and Broadcast MAC addresses for each VLAN, configured into the device by (local or network) management specifying the set of ports to which frames received from specific ports and containing specific Multicast and Broadcast destination addresses are allowed to be forwarded. A value of zero in this table (as the port number from which frames with a specific destination address are received) is used to specify all ports for which there is no specific entry in this table for that particular destination address. Entries are valid for Multicast and Broadcast addresses only
**type**\: :py:class:`Dot1qStaticMulticastTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticMulticastTable>`
**config**\: False
.. attribute:: dot1qvlancurrenttable
A table containing current configuration information for each VLAN currently configured into the device by (local or network) management, or dynamically created as a result of GVRP requests received
**type**\: :py:class:`Dot1qVlanCurrentTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable>`
**config**\: False
.. attribute:: dot1qvlanstatictable
A table containing static configuration information for each VLAN configured into the device by (local or network) management. All entries are permanent and will be restored after the device is reset
**type**\: :py:class:`Dot1qVlanStaticTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanStaticTable>`
**config**\: False
.. attribute:: dot1qportvlanstatisticstable
A table containing per\-port, per\-VLAN statistics for traffic received. Separate objects are provided for both the most\-significant and least\-significant bits of statistics counters for ports that are associated with this transparent bridge. The most\-significant bit objects are only required on high\-capacity interfaces, as defined in the conformance clauses for these objects. This mechanism is provided as a way to read 64\-bit counters for agents that support only SNMPv1. Note that the reporting of most\-significant and least\- significant counter bits separately runs the risk of missing an overflow of the lower bits in the interval between sampling. The manager must be aware of this possibility, even within the same varbindlist, when interpreting the results of a request or asynchronous notification
**type**\: :py:class:`Dot1qPortVlanStatisticsTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qPortVlanStatisticsTable>`
**config**\: False
.. attribute:: dot1qportvlanhcstatisticstable
A table containing per\-port, per\-VLAN statistics for traffic on high\-capacity interfaces
**type**\: :py:class:`Dot1qPortVlanHCStatisticsTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable>`
**config**\: False
.. attribute:: dot1qlearningconstraintstable
A table containing learning constraints for sets of Shared and Independent VLANs
**type**\: :py:class:`Dot1qLearningConstraintsTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qLearningConstraintsTable>`
**config**\: False
.. attribute:: dot1vprotocolgrouptable
A table that contains mappings from Protocol Templates to Protocol Group Identifiers used for Port\-and\-Protocol\-based VLAN Classification
**type**\: :py:class:`Dot1vProtocolGroupTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1vProtocolGroupTable>`
**config**\: False
.. attribute:: dot1vprotocolporttable
A table that contains VID sets used for Port\-and\-Protocol\-based VLAN Classification
**type**\: :py:class:`Dot1vProtocolPortTable <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1vProtocolPortTable>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB, self).__init__()
self._top_entity = None
self.yang_name = "Q-BRIDGE-MIB"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qBase", ("dot1qbase", QBRIDGEMIB.Dot1qBase)), ("dot1qVlan", ("dot1qvlan", QBRIDGEMIB.Dot1qVlan)), ("dot1qFdbTable", ("dot1qfdbtable", QBRIDGEMIB.Dot1qFdbTable)), ("dot1qTpFdbTable", ("dot1qtpfdbtable", QBRIDGEMIB.Dot1qTpFdbTable)), ("dot1qTpGroupTable", ("dot1qtpgrouptable", QBRIDGEMIB.Dot1qTpGroupTable)), ("dot1qForwardAllTable", ("dot1qforwardalltable", QBRIDGEMIB.Dot1qForwardAllTable)), ("dot1qForwardUnregisteredTable", ("dot1qforwardunregisteredtable", QBRIDGEMIB.Dot1qForwardUnregisteredTable)), ("dot1qStaticUnicastTable", ("dot1qstaticunicasttable", QBRIDGEMIB.Dot1qStaticUnicastTable)), ("dot1qStaticMulticastTable", ("dot1qstaticmulticasttable", QBRIDGEMIB.Dot1qStaticMulticastTable)), ("dot1qVlanCurrentTable", ("dot1qvlancurrenttable", QBRIDGEMIB.Dot1qVlanCurrentTable)), ("dot1qVlanStaticTable", ("dot1qvlanstatictable", QBRIDGEMIB.Dot1qVlanStaticTable)), ("dot1qPortVlanStatisticsTable", ("dot1qportvlanstatisticstable", QBRIDGEMIB.Dot1qPortVlanStatisticsTable)), ("dot1qPortVlanHCStatisticsTable", ("dot1qportvlanhcstatisticstable", QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable)), ("dot1qLearningConstraintsTable", ("dot1qlearningconstraintstable", QBRIDGEMIB.Dot1qLearningConstraintsTable)), ("dot1vProtocolGroupTable", ("dot1vprotocolgrouptable", QBRIDGEMIB.Dot1vProtocolGroupTable)), ("dot1vProtocolPortTable", ("dot1vprotocolporttable", QBRIDGEMIB.Dot1vProtocolPortTable))])
self._leafs = OrderedDict()
self.dot1qbase = QBRIDGEMIB.Dot1qBase()
self.dot1qbase.parent = self
self._children_name_map["dot1qbase"] = "dot1qBase"
self.dot1qvlan = QBRIDGEMIB.Dot1qVlan()
self.dot1qvlan.parent = self
self._children_name_map["dot1qvlan"] = "dot1qVlan"
self.dot1qfdbtable = QBRIDGEMIB.Dot1qFdbTable()
self.dot1qfdbtable.parent = self
self._children_name_map["dot1qfdbtable"] = "dot1qFdbTable"
self.dot1qtpfdbtable = QBRIDGEMIB.Dot1qTpFdbTable()
self.dot1qtpfdbtable.parent = self
self._children_name_map["dot1qtpfdbtable"] = "dot1qTpFdbTable"
self.dot1qtpgrouptable = QBRIDGEMIB.Dot1qTpGroupTable()
self.dot1qtpgrouptable.parent = self
self._children_name_map["dot1qtpgrouptable"] = "dot1qTpGroupTable"
self.dot1qforwardalltable = QBRIDGEMIB.Dot1qForwardAllTable()
self.dot1qforwardalltable.parent = self
self._children_name_map["dot1qforwardalltable"] = "dot1qForwardAllTable"
self.dot1qforwardunregisteredtable = QBRIDGEMIB.Dot1qForwardUnregisteredTable()
self.dot1qforwardunregisteredtable.parent = self
self._children_name_map["dot1qforwardunregisteredtable"] = "dot1qForwardUnregisteredTable"
self.dot1qstaticunicasttable = QBRIDGEMIB.Dot1qStaticUnicastTable()
self.dot1qstaticunicasttable.parent = self
self._children_name_map["dot1qstaticunicasttable"] = "dot1qStaticUnicastTable"
self.dot1qstaticmulticasttable = QBRIDGEMIB.Dot1qStaticMulticastTable()
self.dot1qstaticmulticasttable.parent = self
self._children_name_map["dot1qstaticmulticasttable"] = "dot1qStaticMulticastTable"
self.dot1qvlancurrenttable = QBRIDGEMIB.Dot1qVlanCurrentTable()
self.dot1qvlancurrenttable.parent = self
self._children_name_map["dot1qvlancurrenttable"] = "dot1qVlanCurrentTable"
self.dot1qvlanstatictable = QBRIDGEMIB.Dot1qVlanStaticTable()
self.dot1qvlanstatictable.parent = self
self._children_name_map["dot1qvlanstatictable"] = "dot1qVlanStaticTable"
self.dot1qportvlanstatisticstable = QBRIDGEMIB.Dot1qPortVlanStatisticsTable()
self.dot1qportvlanstatisticstable.parent = self
self._children_name_map["dot1qportvlanstatisticstable"] = "dot1qPortVlanStatisticsTable"
self.dot1qportvlanhcstatisticstable = QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable()
self.dot1qportvlanhcstatisticstable.parent = self
self._children_name_map["dot1qportvlanhcstatisticstable"] = "dot1qPortVlanHCStatisticsTable"
self.dot1qlearningconstraintstable = QBRIDGEMIB.Dot1qLearningConstraintsTable()
self.dot1qlearningconstraintstable.parent = self
self._children_name_map["dot1qlearningconstraintstable"] = "dot1qLearningConstraintsTable"
self.dot1vprotocolgrouptable = QBRIDGEMIB.Dot1vProtocolGroupTable()
self.dot1vprotocolgrouptable.parent = self
self._children_name_map["dot1vprotocolgrouptable"] = "dot1vProtocolGroupTable"
self.dot1vprotocolporttable = QBRIDGEMIB.Dot1vProtocolPortTable()
self.dot1vprotocolporttable.parent = self
self._children_name_map["dot1vprotocolporttable"] = "dot1vProtocolPortTable"
self._segment_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB, [], name, value)
class Dot1qBase(Entity):
"""
.. attribute:: dot1qvlanversionnumber
The version number of IEEE 802.1Q that this device supports
**type**\: :py:class:`Dot1qVlanVersionNumber <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qBase.Dot1qVlanVersionNumber>`
**config**\: False
.. attribute:: dot1qmaxvlanid
The maximum IEEE 802.1Q VLAN\-ID that this device supports
**type**\: int
**range:** 1..4094
**config**\: False
.. attribute:: dot1qmaxsupportedvlans
The maximum number of IEEE 802.1Q VLANs that this device supports
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qnumvlans
The current number of IEEE 802.1Q VLANs that are configured in this device
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qgvrpstatus
The administrative status requested by management for GVRP. The value enabled(1) indicates that GVRP should be enabled on this device, on all ports for which it has not been specifically disabled. When disabled(2), GVRP is disabled on all ports, and all GVRP packets will be forwarded transparently. This object affects all GVRP Applicant and Registrar state machines. A transition from disabled(2) to enabled(1) will cause a reset of all GVRP state machines on all ports. The value of this object MUST be retained across reinitializations of the management system
**type**\: :py:class:`EnabledStatus <ydk.models.cisco_ios_xe.P_BRIDGE_MIB.EnabledStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qBase, self).__init__()
self.yang_name = "dot1qBase"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanversionnumber', (YLeaf(YType.enumeration, 'dot1qVlanVersionNumber'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qBase.Dot1qVlanVersionNumber')])),
('dot1qmaxvlanid', (YLeaf(YType.int32, 'dot1qMaxVlanId'), ['int'])),
('dot1qmaxsupportedvlans', (YLeaf(YType.uint32, 'dot1qMaxSupportedVlans'), ['int'])),
('dot1qnumvlans', (YLeaf(YType.uint32, 'dot1qNumVlans'), ['int'])),
('dot1qgvrpstatus', (YLeaf(YType.enumeration, 'dot1qGvrpStatus'), [('ydk.models.cisco_ios_xe.P_BRIDGE_MIB', 'EnabledStatus', '')])),
])
self.dot1qvlanversionnumber = None
self.dot1qmaxvlanid = None
self.dot1qmaxsupportedvlans = None
self.dot1qnumvlans = None
self.dot1qgvrpstatus = None
self._segment_path = lambda: "dot1qBase"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qBase, ['dot1qvlanversionnumber', 'dot1qmaxvlanid', 'dot1qmaxsupportedvlans', 'dot1qnumvlans', 'dot1qgvrpstatus'], name, value)
class Dot1qVlanVersionNumber(Enum):
"""
Dot1qVlanVersionNumber (Enum Class)
The version number of IEEE 802.1Q that this device
supports.
.. data:: version1 = 1
"""
version1 = Enum.YLeaf(1, "version1")
class Dot1qVlan(Entity):
"""
.. attribute:: dot1qvlannumdeletes
The number of times a VLAN entry has been deleted from the dot1qVlanCurrentTable (for any reason). If an entry is deleted, then inserted, and then deleted, this counter will be incremented by 2
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qnextfreelocalvlanindex
The next available value for dot1qVlanIndex of a local VLAN entry in dot1qVlanStaticTable. This will report values >=4096 if a new Local VLAN may be created or else the value 0 if this is not possible. A row creation operation in this table for an entry with a local VlanIndex value may fail if the current value of this object is not used as the index. Even if the value read is used, there is no guarantee that it will still be the valid index when the create operation is attempted; another manager may have already got in during the intervening time interval. In this case, dot1qNextFreeLocalVlanIndex should be re\-read and the creation re\-tried with the new value. This value will automatically change when the current value is used to create a new row
**type**\: int
**range:** 0..0 \| 4096..2147483647
**config**\: False
.. attribute:: dot1qconstraintsetdefault
The identity of the constraint set to which a VLAN belongs, if there is not an explicit entry for that VLAN in dot1qLearningConstraintsTable. The value of this object MUST be retained across reinitializations of the management system
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: dot1qconstrainttypedefault
The type of constraint set to which a VLAN belongs, if there is not an explicit entry for that VLAN in dot1qLearningConstraintsTable. The types are as defined for dot1qConstraintType. The value of this object MUST be retained across reinitializations of the management system
**type**\: :py:class:`Dot1qConstraintTypeDefault <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlan.Dot1qConstraintTypeDefault>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qVlan, self).__init__()
self.yang_name = "dot1qVlan"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlannumdeletes', (YLeaf(YType.uint32, 'dot1qVlanNumDeletes'), ['int'])),
('dot1qnextfreelocalvlanindex', (YLeaf(YType.int32, 'dot1qNextFreeLocalVlanIndex'), ['int'])),
('dot1qconstraintsetdefault', (YLeaf(YType.int32, 'dot1qConstraintSetDefault'), ['int'])),
('dot1qconstrainttypedefault', (YLeaf(YType.enumeration, 'dot1qConstraintTypeDefault'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qVlan.Dot1qConstraintTypeDefault')])),
])
self.dot1qvlannumdeletes = None
self.dot1qnextfreelocalvlanindex = None
self.dot1qconstraintsetdefault = None
self.dot1qconstrainttypedefault = None
self._segment_path = lambda: "dot1qVlan"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qVlan, ['dot1qvlannumdeletes', 'dot1qnextfreelocalvlanindex', 'dot1qconstraintsetdefault', 'dot1qconstrainttypedefault'], name, value)
class Dot1qConstraintTypeDefault(Enum):
"""
Dot1qConstraintTypeDefault (Enum Class)
The type of constraint set to which a VLAN belongs, if
there is not an explicit entry for that VLAN in
dot1qLearningConstraintsTable. The types are as defined
for dot1qConstraintType.
The value of this object MUST be retained across
reinitializations of the management system.
.. data:: independent = 1
.. data:: shared = 2
"""
independent = Enum.YLeaf(1, "independent")
shared = Enum.YLeaf(2, "shared")
class Dot1qFdbTable(Entity):
"""
A table that contains configuration and control
information for each Filtering Database currently
operating on this device. Entries in this table appear
automatically when VLANs are assigned FDB IDs in the
dot1qVlanCurrentTable.
.. attribute:: dot1qfdbentry
Information about a specific Filtering Database
**type**\: list of :py:class:`Dot1qFdbEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qFdbTable, self).__init__()
self.yang_name = "dot1qFdbTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qFdbEntry", ("dot1qfdbentry", QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry))])
self._leafs = OrderedDict()
self.dot1qfdbentry = YList(self)
self._segment_path = lambda: "dot1qFdbTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qFdbTable, [], name, value)
class Dot1qFdbEntry(Entity):
"""
Information about a specific Filtering Database.
.. attribute:: dot1qfdbid (key)
The identity of this Filtering Database
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qfdbdynamiccount
The current number of dynamic entries in this Filtering Database
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry, self).__init__()
self.yang_name = "dot1qFdbEntry"
self.yang_parent_name = "dot1qFdbTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qfdbid']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qfdbid', (YLeaf(YType.uint32, 'dot1qFdbId'), ['int'])),
('dot1qfdbdynamiccount', (YLeaf(YType.uint32, 'dot1qFdbDynamicCount'), ['int'])),
])
self.dot1qfdbid = None
self.dot1qfdbdynamiccount = None
self._segment_path = lambda: "dot1qFdbEntry" + "[dot1qFdbId='" + str(self.dot1qfdbid) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qFdbTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry, ['dot1qfdbid', 'dot1qfdbdynamiccount'], name, value)
class Dot1qTpFdbTable(Entity):
"""
A table that contains information about unicast entries
for which the device has forwarding and/or filtering
information. This information is used by the
transparent bridging function in determining how to
propagate a received frame.
.. attribute:: dot1qtpfdbentry
Information about a specific unicast MAC address for which the device has some forwarding and/or filtering information
**type**\: list of :py:class:`Dot1qTpFdbEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qTpFdbTable.Dot1qTpFdbEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qTpFdbTable, self).__init__()
self.yang_name = "dot1qTpFdbTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qTpFdbEntry", ("dot1qtpfdbentry", QBRIDGEMIB.Dot1qTpFdbTable.Dot1qTpFdbEntry))])
self._leafs = OrderedDict()
self.dot1qtpfdbentry = YList(self)
self._segment_path = lambda: "dot1qTpFdbTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qTpFdbTable, [], name, value)
class Dot1qTpFdbEntry(Entity):
"""
Information about a specific unicast MAC address for
which the device has some forwarding and/or filtering
information.
.. attribute:: dot1qfdbid (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qfdbid <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry>`
**config**\: False
.. attribute:: dot1qtpfdbaddress (key)
A unicast MAC address for which the device has forwarding and/or filtering information
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
**config**\: False
.. attribute:: dot1qtpfdbport
Either the value '0', or the port number of the port on which a frame having a source address equal to the value of the corresponding instance of dot1qTpFdbAddress has been seen. A value of '0' indicates that the port number has not been learned but that the device does have some forwarding/filtering information about this address (e.g., in the dot1qStaticUnicastTable). Implementors are encouraged to assign the port value to this object whenever it is learned, even for addresses for which the corresponding value of dot1qTpFdbStatus is not learned(3)
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: dot1qtpfdbstatus
The status of this entry. The meanings of the values are\: other(1) \- none of the following. This may include the case where some other MIB object (not the corresponding instance of dot1qTpFdbPort, nor an entry in the dot1qStaticUnicastTable) is being used to determine if and how frames addressed to the value of the corresponding instance of dot1qTpFdbAddress are being forwarded. invalid(2) \- this entry is no longer valid (e.g., it was learned but has since aged out), but has not yet been flushed from the table. learned(3) \- the value of the corresponding instance of dot1qTpFdbPort was learned and is being used. self(4) \- the value of the corresponding instance of dot1qTpFdbAddress represents one of the device's addresses. The corresponding instance of dot1qTpFdbPort indicates which of the device's ports has this address. mgmt(5) \- the value of the corresponding instance of dot1qTpFdbAddress is also the value of an existing instance of dot1qStaticAddress
**type**\: :py:class:`Dot1qTpFdbStatus <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qTpFdbTable.Dot1qTpFdbEntry.Dot1qTpFdbStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qTpFdbTable.Dot1qTpFdbEntry, self).__init__()
self.yang_name = "dot1qTpFdbEntry"
self.yang_parent_name = "dot1qTpFdbTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qfdbid','dot1qtpfdbaddress']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qfdbid', (YLeaf(YType.str, 'dot1qFdbId'), ['int'])),
('dot1qtpfdbaddress', (YLeaf(YType.str, 'dot1qTpFdbAddress'), ['str'])),
('dot1qtpfdbport', (YLeaf(YType.int32, 'dot1qTpFdbPort'), ['int'])),
('dot1qtpfdbstatus', (YLeaf(YType.enumeration, 'dot1qTpFdbStatus'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qTpFdbTable.Dot1qTpFdbEntry.Dot1qTpFdbStatus')])),
])
self.dot1qfdbid = None
self.dot1qtpfdbaddress = None
self.dot1qtpfdbport = None
self.dot1qtpfdbstatus = None
self._segment_path = lambda: "dot1qTpFdbEntry" + "[dot1qFdbId='" + str(self.dot1qfdbid) + "']" + "[dot1qTpFdbAddress='" + str(self.dot1qtpfdbaddress) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qTpFdbTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qTpFdbTable.Dot1qTpFdbEntry, ['dot1qfdbid', 'dot1qtpfdbaddress', 'dot1qtpfdbport', 'dot1qtpfdbstatus'], name, value)
class Dot1qTpFdbStatus(Enum):
"""
Dot1qTpFdbStatus (Enum Class)
The status of this entry. The meanings of the values
are\:
other(1) \- none of the following. This may include
the case where some other MIB object (not the
corresponding instance of dot1qTpFdbPort, nor an
entry in the dot1qStaticUnicastTable) is being
used to determine if and how frames addressed to
the value of the corresponding instance of
dot1qTpFdbAddress are being forwarded.
invalid(2) \- this entry is no longer valid (e.g., it
was learned but has since aged out), but has not
yet been flushed from the table.
learned(3) \- the value of the corresponding instance
of dot1qTpFdbPort was learned and is being used.
self(4) \- the value of the corresponding instance of
dot1qTpFdbAddress represents one of the device's
addresses. The corresponding instance of
dot1qTpFdbPort indicates which of the device's
ports has this address.
mgmt(5) \- the value of the corresponding instance of
dot1qTpFdbAddress is also the value of an
existing instance of dot1qStaticAddress.
.. data:: other = 1
.. data:: invalid = 2
.. data:: learned = 3
.. data:: self = 4
.. data:: mgmt = 5
"""
other = Enum.YLeaf(1, "other")
invalid = Enum.YLeaf(2, "invalid")
learned = Enum.YLeaf(3, "learned")
self = Enum.YLeaf(4, "self")
mgmt = Enum.YLeaf(5, "mgmt")
class Dot1qTpGroupTable(Entity):
"""
A table containing filtering information for VLANs
configured into the bridge by (local or network)
management, or learned dynamically, specifying the set of
ports to which frames received on a VLAN for this FDB
and containing a specific Group destination address are
allowed to be forwarded.
.. attribute:: dot1qtpgroupentry
Filtering information configured into the bridge by management, or learned dynamically, specifying the set of ports to which frames received on a VLAN and containing a specific Group destination address are allowed to be forwarded. The subset of these ports learned dynamically is also provided
**type**\: list of :py:class:`Dot1qTpGroupEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qTpGroupTable.Dot1qTpGroupEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qTpGroupTable, self).__init__()
self.yang_name = "dot1qTpGroupTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qTpGroupEntry", ("dot1qtpgroupentry", QBRIDGEMIB.Dot1qTpGroupTable.Dot1qTpGroupEntry))])
self._leafs = OrderedDict()
self.dot1qtpgroupentry = YList(self)
self._segment_path = lambda: "dot1qTpGroupTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qTpGroupTable, [], name, value)
class Dot1qTpGroupEntry(Entity):
"""
Filtering information configured into the bridge by
management, or learned dynamically, specifying the set of
ports to which frames received on a VLAN and containing
a specific Group destination address are allowed to be
forwarded. The subset of these ports learned dynamically
is also provided.
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qtpgroupaddress (key)
The destination Group MAC address in a frame to which this entry's filtering information applies
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
**config**\: False
.. attribute:: dot1qtpgroupegressports
The complete set of ports, in this VLAN, to which frames destined for this Group MAC address are currently being explicitly forwarded. This does not include ports for which this address is only implicitly forwarded, in the dot1qForwardAllPorts list
**type**\: str
**config**\: False
.. attribute:: dot1qtpgrouplearnt
The subset of ports in dot1qTpGroupEgressPorts that were learned by GMRP or some other dynamic mechanism, in this Filtering database
**type**\: str
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qTpGroupTable.Dot1qTpGroupEntry, self).__init__()
self.yang_name = "dot1qTpGroupEntry"
self.yang_parent_name = "dot1qTpGroupTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlanindex','dot1qtpgroupaddress']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qtpgroupaddress', (YLeaf(YType.str, 'dot1qTpGroupAddress'), ['str'])),
('dot1qtpgroupegressports', (YLeaf(YType.str, 'dot1qTpGroupEgressPorts'), ['str'])),
('dot1qtpgrouplearnt', (YLeaf(YType.str, 'dot1qTpGroupLearnt'), ['str'])),
])
self.dot1qvlanindex = None
self.dot1qtpgroupaddress = None
self.dot1qtpgroupegressports = None
self.dot1qtpgrouplearnt = None
self._segment_path = lambda: "dot1qTpGroupEntry" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']" + "[dot1qTpGroupAddress='" + str(self.dot1qtpgroupaddress) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qTpGroupTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qTpGroupTable.Dot1qTpGroupEntry, ['dot1qvlanindex', 'dot1qtpgroupaddress', 'dot1qtpgroupegressports', 'dot1qtpgrouplearnt'], name, value)
class Dot1qForwardAllTable(Entity):
"""
A table containing forwarding information for each
VLAN, specifying the set of ports to which forwarding of
all multicasts applies, configured statically by
management or dynamically by GMRP. An entry appears in
this table for all VLANs that are currently
instantiated.
.. attribute:: dot1qforwardallentry
Forwarding information for a VLAN, specifying the set of ports to which all multicasts should be forwarded, configured statically by management or dynamically by GMRP
**type**\: list of :py:class:`Dot1qForwardAllEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qForwardAllTable.Dot1qForwardAllEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qForwardAllTable, self).__init__()
self.yang_name = "dot1qForwardAllTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qForwardAllEntry", ("dot1qforwardallentry", QBRIDGEMIB.Dot1qForwardAllTable.Dot1qForwardAllEntry))])
self._leafs = OrderedDict()
self.dot1qforwardallentry = YList(self)
self._segment_path = lambda: "dot1qForwardAllTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qForwardAllTable, [], name, value)
class Dot1qForwardAllEntry(Entity):
"""
Forwarding information for a VLAN, specifying the set
of ports to which all multicasts should be forwarded,
configured statically by management or dynamically by
GMRP.
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qforwardallports
The complete set of ports in this VLAN to which all multicast group\-addressed frames are to be forwarded. This includes ports for which this need has been determined dynamically by GMRP, or configured statically by management
**type**\: str
**config**\: False
.. attribute:: dot1qforwardallstaticports
The set of ports configured by management in this VLAN to which all multicast group\-addressed frames are to be forwarded. Ports entered in this list will also appear in the complete set shown by dot1qForwardAllPorts. This value will be restored after the device is reset. This only applies to ports that are members of the VLAN, defined by dot1qVlanCurrentEgressPorts. A port may not be added in this set if it is already a member of the set of ports in dot1qForwardAllForbiddenPorts. The default value is a string of ones of appropriate length, to indicate the standard behaviour of using basic filtering services, i.e., forward all multicasts to all ports. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
.. attribute:: dot1qforwardallforbiddenports
The set of ports configured by management in this VLAN for which the Service Requirement attribute Forward All Multicast Groups may not be dynamically registered by GMRP. This value will be restored after the device is reset. A port may not be added in this set if it is already a member of the set of ports in dot1qForwardAllStaticPorts. The default value is a string of zeros of appropriate length. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qForwardAllTable.Dot1qForwardAllEntry, self).__init__()
self.yang_name = "dot1qForwardAllEntry"
self.yang_parent_name = "dot1qForwardAllTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qforwardallports', (YLeaf(YType.str, 'dot1qForwardAllPorts'), ['str'])),
('dot1qforwardallstaticports', (YLeaf(YType.str, 'dot1qForwardAllStaticPorts'), ['str'])),
('dot1qforwardallforbiddenports', (YLeaf(YType.str, 'dot1qForwardAllForbiddenPorts'), ['str'])),
])
self.dot1qvlanindex = None
self.dot1qforwardallports = None
self.dot1qforwardallstaticports = None
self.dot1qforwardallforbiddenports = None
self._segment_path = lambda: "dot1qForwardAllEntry" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qForwardAllTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qForwardAllTable.Dot1qForwardAllEntry, ['dot1qvlanindex', 'dot1qforwardallports', 'dot1qforwardallstaticports', 'dot1qforwardallforbiddenports'], name, value)
class Dot1qForwardUnregisteredTable(Entity):
"""
A table containing forwarding information for each
VLAN, specifying the set of ports to which forwarding of
multicast group\-addressed frames for which no
more specific forwarding information applies. This is
configured statically by management and determined
dynamically by GMRP. An entry appears in this table for
all VLANs that are currently instantiated.
.. attribute:: dot1qforwardunregisteredentry
Forwarding information for a VLAN, specifying the set of ports to which all multicasts for which there is no more specific forwarding information shall be forwarded. This is configured statically by management or dynamically by GMRP
**type**\: list of :py:class:`Dot1qForwardUnregisteredEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qForwardUnregisteredTable.Dot1qForwardUnregisteredEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qForwardUnregisteredTable, self).__init__()
self.yang_name = "dot1qForwardUnregisteredTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qForwardUnregisteredEntry", ("dot1qforwardunregisteredentry", QBRIDGEMIB.Dot1qForwardUnregisteredTable.Dot1qForwardUnregisteredEntry))])
self._leafs = OrderedDict()
self.dot1qforwardunregisteredentry = YList(self)
self._segment_path = lambda: "dot1qForwardUnregisteredTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qForwardUnregisteredTable, [], name, value)
class Dot1qForwardUnregisteredEntry(Entity):
"""
Forwarding information for a VLAN, specifying the set
of ports to which all multicasts for which there is no
more specific forwarding information shall be forwarded.
This is configured statically by management or
dynamically by GMRP.
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qforwardunregisteredports
The complete set of ports in this VLAN to which multicast group\-addressed frames for which there is no more specific forwarding information will be forwarded. This includes ports for which this need has been determined dynamically by GMRP, or configured statically by management
**type**\: str
**config**\: False
.. attribute:: dot1qforwardunregisteredstaticports
The set of ports configured by management, in this VLAN, to which multicast group\-addressed frames for which there is no more specific forwarding information are to be forwarded. Ports entered in this list will also appear in the complete set shown by dot1qForwardUnregisteredPorts. This value will be restored after the device is reset. A port may not be added in this set if it is already a member of the set of ports in dot1qForwardUnregisteredForbiddenPorts. The default value is a string of zeros of appropriate length, although this has no effect with the default value of dot1qForwardAllStaticPorts. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
.. attribute:: dot1qforwardunregisteredforbiddenports
The set of ports configured by management in this VLAN for which the Service Requirement attribute Forward Unregistered Multicast Groups may not be dynamically registered by GMRP. This value will be restored after the device is reset. A port may not be added in this set if it is already a member of the set of ports in dot1qForwardUnregisteredStaticPorts. The default value is a string of zeros of appropriate length. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qForwardUnregisteredTable.Dot1qForwardUnregisteredEntry, self).__init__()
self.yang_name = "dot1qForwardUnregisteredEntry"
self.yang_parent_name = "dot1qForwardUnregisteredTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qforwardunregisteredports', (YLeaf(YType.str, 'dot1qForwardUnregisteredPorts'), ['str'])),
('dot1qforwardunregisteredstaticports', (YLeaf(YType.str, 'dot1qForwardUnregisteredStaticPorts'), ['str'])),
('dot1qforwardunregisteredforbiddenports', (YLeaf(YType.str, 'dot1qForwardUnregisteredForbiddenPorts'), ['str'])),
])
self.dot1qvlanindex = None
self.dot1qforwardunregisteredports = None
self.dot1qforwardunregisteredstaticports = None
self.dot1qforwardunregisteredforbiddenports = None
self._segment_path = lambda: "dot1qForwardUnregisteredEntry" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qForwardUnregisteredTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qForwardUnregisteredTable.Dot1qForwardUnregisteredEntry, ['dot1qvlanindex', 'dot1qforwardunregisteredports', 'dot1qforwardunregisteredstaticports', 'dot1qforwardunregisteredforbiddenports'], name, value)
class Dot1qStaticUnicastTable(Entity):
"""
A table containing filtering information for Unicast
MAC addresses for each Filtering Database, configured
into the device by (local or network) management
specifying the set of ports to which frames received
from specific ports and containing specific unicast
destination addresses are allowed to be forwarded. A
value of zero in this table (as the port number from
which frames with a specific destination address are
received) is used to specify all ports for which there
is no specific entry in this table for that particular
destination address. Entries are valid for unicast
addresses only.
.. attribute:: dot1qstaticunicastentry
Filtering information configured into the device by (local or network) management specifying the set of ports to which frames received from a specific port and containing a specific unicast destination address are allowed to be forwarded
**type**\: list of :py:class:`Dot1qStaticUnicastEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qStaticUnicastTable, self).__init__()
self.yang_name = "dot1qStaticUnicastTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qStaticUnicastEntry", ("dot1qstaticunicastentry", QBRIDGEMIB.Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry))])
self._leafs = OrderedDict()
self.dot1qstaticunicastentry = YList(self)
self._segment_path = lambda: "dot1qStaticUnicastTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qStaticUnicastTable, [], name, value)
class Dot1qStaticUnicastEntry(Entity):
"""
Filtering information configured into the device by
(local or network) management specifying the set of
ports to which frames received from a specific port and
containing a specific unicast destination address are
allowed to be forwarded.
.. attribute:: dot1qfdbid (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qfdbid <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qFdbTable.Dot1qFdbEntry>`
**config**\: False
.. attribute:: dot1qstaticunicastaddress (key)
The destination MAC address in a frame to which this entry's filtering information applies. This object must take the value of a unicast address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
**config**\: False
.. attribute:: dot1qstaticunicastreceiveport (key)
Either the value '0' or the port number of the port from which a frame must be received in order for this entry's filtering information to apply. A value of zero indicates that this entry applies on all ports of the device for which there is no other applicable entry
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: dot1qstaticunicastallowedtogoto
The set of ports for which a frame with a specific unicast address will be flooded in the event that it has not been learned. It also specifies the set of ports on which a specific unicast address may be dynamically learned. The dot1qTpFdbTable will have an equivalent entry with a dot1qTpFdbPort value of '0' until this address has been learned, at which point it will be updated with the port the address has been seen on. This only applies to ports that are members of the VLAN, defined by dot1qVlanCurrentEgressPorts. The default value of this object is a string of ones of appropriate length. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
.. attribute:: dot1qstaticunicaststatus
This object indicates the status of this entry. other(1) \- this entry is currently in use, but the conditions under which it will remain so differ from the following values. invalid(2) \- writing this value to the object removes the corresponding entry. permanent(3) \- this entry is currently in use and will remain so after the next reset of the bridge. deleteOnReset(4) \- this entry is currently in use and will remain so until the next reset of the bridge. deleteOnTimeout(5) \- this entry is currently in use and will remain so until it is aged out. The value of this object MUST be retained across reinitializations of the management system
**type**\: :py:class:`Dot1qStaticUnicastStatus <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry.Dot1qStaticUnicastStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry, self).__init__()
self.yang_name = "dot1qStaticUnicastEntry"
self.yang_parent_name = "dot1qStaticUnicastTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qfdbid','dot1qstaticunicastaddress','dot1qstaticunicastreceiveport']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qfdbid', (YLeaf(YType.str, 'dot1qFdbId'), ['int'])),
('dot1qstaticunicastaddress', (YLeaf(YType.str, 'dot1qStaticUnicastAddress'), ['str'])),
('dot1qstaticunicastreceiveport', (YLeaf(YType.int32, 'dot1qStaticUnicastReceivePort'), ['int'])),
('dot1qstaticunicastallowedtogoto', (YLeaf(YType.str, 'dot1qStaticUnicastAllowedToGoTo'), ['str'])),
('dot1qstaticunicaststatus', (YLeaf(YType.enumeration, 'dot1qStaticUnicastStatus'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry.Dot1qStaticUnicastStatus')])),
])
self.dot1qfdbid = None
self.dot1qstaticunicastaddress = None
self.dot1qstaticunicastreceiveport = None
self.dot1qstaticunicastallowedtogoto = None
self.dot1qstaticunicaststatus = None
self._segment_path = lambda: "dot1qStaticUnicastEntry" + "[dot1qFdbId='" + str(self.dot1qfdbid) + "']" + "[dot1qStaticUnicastAddress='" + str(self.dot1qstaticunicastaddress) + "']" + "[dot1qStaticUnicastReceivePort='" + str(self.dot1qstaticunicastreceiveport) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qStaticUnicastTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qStaticUnicastTable.Dot1qStaticUnicastEntry, ['dot1qfdbid', 'dot1qstaticunicastaddress', 'dot1qstaticunicastreceiveport', 'dot1qstaticunicastallowedtogoto', 'dot1qstaticunicaststatus'], name, value)
class Dot1qStaticUnicastStatus(Enum):
"""
Dot1qStaticUnicastStatus (Enum Class)
This object indicates the status of this entry.
other(1) \- this entry is currently in use, but
the conditions under which it will remain
so differ from the following values.
invalid(2) \- writing this value to the object
removes the corresponding entry.
permanent(3) \- this entry is currently in use
and will remain so after the next reset of
the bridge.
deleteOnReset(4) \- this entry is currently in
use and will remain so until the next
reset of the bridge.
deleteOnTimeout(5) \- this entry is currently in
use and will remain so until it is aged out.
The value of this object MUST be retained across
reinitializations of the management system.
.. data:: other = 1
.. data:: invalid = 2
.. data:: permanent = 3
.. data:: deleteOnReset = 4
.. data:: deleteOnTimeout = 5
"""
other = Enum.YLeaf(1, "other")
invalid = Enum.YLeaf(2, "invalid")
permanent = Enum.YLeaf(3, "permanent")
deleteOnReset = Enum.YLeaf(4, "deleteOnReset")
deleteOnTimeout = Enum.YLeaf(5, "deleteOnTimeout")
class Dot1qStaticMulticastTable(Entity):
"""
A table containing filtering information for Multicast
and Broadcast MAC addresses for each VLAN, configured
into the device by (local or network) management
specifying the set of ports to which frames received
from specific ports and containing specific Multicast
and Broadcast destination addresses are allowed to be
forwarded. A value of zero in this table (as the port
number from which frames with a specific destination
address are received) is used to specify all ports for
which there is no specific entry in this table for that
particular destination address. Entries are valid for
Multicast and Broadcast addresses only.
.. attribute:: dot1qstaticmulticastentry
Filtering information configured into the device by (local or network) management specifying the set of ports to which frames received from this specific port for this VLAN and containing this Multicast or Broadcast destination address are allowed to be forwarded
**type**\: list of :py:class:`Dot1qStaticMulticastEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qStaticMulticastTable, self).__init__()
self.yang_name = "dot1qStaticMulticastTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qStaticMulticastEntry", ("dot1qstaticmulticastentry", QBRIDGEMIB.Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry))])
self._leafs = OrderedDict()
self.dot1qstaticmulticastentry = YList(self)
self._segment_path = lambda: "dot1qStaticMulticastTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qStaticMulticastTable, [], name, value)
class Dot1qStaticMulticastEntry(Entity):
"""
Filtering information configured into the device by
(local or network) management specifying the set of
ports to which frames received from this specific port
for this VLAN and containing this Multicast or Broadcast
destination address are allowed to be forwarded.
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qstaticmulticastaddress (key)
The destination MAC address in a frame to which this entry's filtering information applies. This object must take the value of a Multicast or Broadcast address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
**config**\: False
.. attribute:: dot1qstaticmulticastreceiveport (key)
Either the value '0' or the port number of the port from which a frame must be received in order for this entry's filtering information to apply. A value of zero indicates that this entry applies on all ports of the device for which there is no other applicable entry
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: dot1qstaticmulticaststaticegressports
The set of ports to which frames received from a specific port and destined for a specific Multicast or Broadcast MAC address must be forwarded, regardless of any dynamic information, e.g., from GMRP. A port may not be added in this set if it is already a member of the set of ports in dot1qStaticMulticastForbiddenEgressPorts. The default value of this object is a string of ones of appropriate length. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
.. attribute:: dot1qstaticmulticastforbiddenegressports
The set of ports to which frames received from a specific port and destined for a specific Multicast or Broadcast MAC address must not be forwarded, regardless of any dynamic information, e.g., from GMRP. A port may not be added in this set if it is already a member of the set of ports in dot1qStaticMulticastStaticEgressPorts. The default value of this object is a string of zeros of appropriate length. The value of this object MUST be retained across reinitializations of the management system
**type**\: str
**config**\: False
.. attribute:: dot1qstaticmulticaststatus
This object indicates the status of this entry. other(1) \- this entry is currently in use, but the conditions under which it will remain so differ from the following values. invalid(2) \- writing this value to the object removes the corresponding entry. permanent(3) \- this entry is currently in use and will remain so after the next reset of the bridge. deleteOnReset(4) \- this entry is currently in use and will remain so until the next reset of the bridge. deleteOnTimeout(5) \- this entry is currently in use and will remain so until it is aged out. The value of this object MUST be retained across reinitializations of the management system
**type**\: :py:class:`Dot1qStaticMulticastStatus <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry.Dot1qStaticMulticastStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry, self).__init__()
self.yang_name = "dot1qStaticMulticastEntry"
self.yang_parent_name = "dot1qStaticMulticastTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlanindex','dot1qstaticmulticastaddress','dot1qstaticmulticastreceiveport']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qstaticmulticastaddress', (YLeaf(YType.str, 'dot1qStaticMulticastAddress'), ['str'])),
('dot1qstaticmulticastreceiveport', (YLeaf(YType.int32, 'dot1qStaticMulticastReceivePort'), ['int'])),
('dot1qstaticmulticaststaticegressports', (YLeaf(YType.str, 'dot1qStaticMulticastStaticEgressPorts'), ['str'])),
('dot1qstaticmulticastforbiddenegressports', (YLeaf(YType.str, 'dot1qStaticMulticastForbiddenEgressPorts'), ['str'])),
('dot1qstaticmulticaststatus', (YLeaf(YType.enumeration, 'dot1qStaticMulticastStatus'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry.Dot1qStaticMulticastStatus')])),
])
self.dot1qvlanindex = None
self.dot1qstaticmulticastaddress = None
self.dot1qstaticmulticastreceiveport = None
self.dot1qstaticmulticaststaticegressports = None
self.dot1qstaticmulticastforbiddenegressports = None
self.dot1qstaticmulticaststatus = None
self._segment_path = lambda: "dot1qStaticMulticastEntry" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']" + "[dot1qStaticMulticastAddress='" + str(self.dot1qstaticmulticastaddress) + "']" + "[dot1qStaticMulticastReceivePort='" + str(self.dot1qstaticmulticastreceiveport) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qStaticMulticastTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qStaticMulticastTable.Dot1qStaticMulticastEntry, ['dot1qvlanindex', 'dot1qstaticmulticastaddress', 'dot1qstaticmulticastreceiveport', 'dot1qstaticmulticaststaticegressports', 'dot1qstaticmulticastforbiddenegressports', 'dot1qstaticmulticaststatus'], name, value)
class Dot1qStaticMulticastStatus(Enum):
"""
Dot1qStaticMulticastStatus (Enum Class)
This object indicates the status of this entry.
other(1) \- this entry is currently in use, but
the conditions under which it will remain
so differ from the following values.
invalid(2) \- writing this value to the object
removes the corresponding entry.
permanent(3) \- this entry is currently in use
and will remain so after the next reset of
the bridge.
deleteOnReset(4) \- this entry is currently in
use and will remain so until the next
reset of the bridge.
deleteOnTimeout(5) \- this entry is currently in
use and will remain so until it is aged out.
The value of this object MUST be retained across
reinitializations of the management system.
.. data:: other = 1
.. data:: invalid = 2
.. data:: permanent = 3
.. data:: deleteOnReset = 4
.. data:: deleteOnTimeout = 5
"""
other = Enum.YLeaf(1, "other")
invalid = Enum.YLeaf(2, "invalid")
permanent = Enum.YLeaf(3, "permanent")
deleteOnReset = Enum.YLeaf(4, "deleteOnReset")
deleteOnTimeout = Enum.YLeaf(5, "deleteOnTimeout")
class Dot1qVlanCurrentTable(Entity):
"""
A table containing current configuration information
for each VLAN currently configured into the device by
(local or network) management, or dynamically created
as a result of GVRP requests received.
.. attribute:: dot1qvlancurrententry
Information for a VLAN configured into the device by (local or network) management, or dynamically created as a result of GVRP requests received
**type**\: list of :py:class:`Dot1qVlanCurrentEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qVlanCurrentTable, self).__init__()
self.yang_name = "dot1qVlanCurrentTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qVlanCurrentEntry", ("dot1qvlancurrententry", QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry))])
self._leafs = OrderedDict()
self.dot1qvlancurrententry = YList(self)
self._segment_path = lambda: "dot1qVlanCurrentTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qVlanCurrentTable, [], name, value)
class Dot1qVlanCurrentEntry(Entity):
"""
Information for a VLAN configured into the device by
(local or network) management, or dynamically created
as a result of GVRP requests received.
.. attribute:: dot1qvlantimemark (key)
A TimeFilter for this entry. See the TimeFilter textual convention to see how this works
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qvlanindex (key)
The VLAN\-ID or other identifier referring to this VLAN
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qvlanfdbid
The Filtering Database used by this VLAN. This is one of the dot1qFdbId values in the dot1qFdbTable. This value is allocated automatically by the device whenever the VLAN is created\: either dynamically by GVRP, or by management, in dot1qVlanStaticTable. Allocation of this value follows the learning constraints defined for this VLAN in dot1qLearningConstraintsTable
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qvlancurrentegressports
The set of ports that are transmitting traffic for this VLAN as either tagged or untagged frames
**type**\: str
**config**\: False
.. attribute:: dot1qvlancurrentuntaggedports
The set of ports that are transmitting traffic for this VLAN as untagged frames
**type**\: str
**config**\: False
.. attribute:: dot1qvlanstatus
This object indicates the status of this entry. other(1) \- this entry is currently in use, but the conditions under which it will remain so differ from the following values. permanent(2) \- this entry, corresponding to an entry in dot1qVlanStaticTable, is currently in use and will remain so after the next reset of the device. The port lists for this entry include ports from the equivalent dot1qVlanStaticTable entry and ports learned dynamically. dynamicGvrp(3) \- this entry is currently in use and will remain so until removed by GVRP. There is no static entry for this VLAN, and it will be removed when the last port leaves the VLAN
**type**\: :py:class:`Dot1qVlanStatus <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry.Dot1qVlanStatus>`
**config**\: False
.. attribute:: dot1qvlancreationtime
The value of sysUpTime when this VLAN was created
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry, self).__init__()
self.yang_name = "dot1qVlanCurrentEntry"
self.yang_parent_name = "dot1qVlanCurrentTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlantimemark','dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlantimemark', (YLeaf(YType.uint32, 'dot1qVlanTimeMark'), ['int'])),
('dot1qvlanindex', (YLeaf(YType.uint32, 'dot1qVlanIndex'), ['int'])),
('dot1qvlanfdbid', (YLeaf(YType.uint32, 'dot1qVlanFdbId'), ['int'])),
('dot1qvlancurrentegressports', (YLeaf(YType.str, 'dot1qVlanCurrentEgressPorts'), ['str'])),
('dot1qvlancurrentuntaggedports', (YLeaf(YType.str, 'dot1qVlanCurrentUntaggedPorts'), ['str'])),
('dot1qvlanstatus', (YLeaf(YType.enumeration, 'dot1qVlanStatus'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry.Dot1qVlanStatus')])),
('dot1qvlancreationtime', (YLeaf(YType.uint32, 'dot1qVlanCreationTime'), ['int'])),
])
self.dot1qvlantimemark = None
self.dot1qvlanindex = None
self.dot1qvlanfdbid = None
self.dot1qvlancurrentegressports = None
self.dot1qvlancurrentuntaggedports = None
self.dot1qvlanstatus = None
self.dot1qvlancreationtime = None
self._segment_path = lambda: "dot1qVlanCurrentEntry" + "[dot1qVlanTimeMark='" + str(self.dot1qvlantimemark) + "']" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qVlanCurrentTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry, ['dot1qvlantimemark', 'dot1qvlanindex', 'dot1qvlanfdbid', 'dot1qvlancurrentegressports', 'dot1qvlancurrentuntaggedports', 'dot1qvlanstatus', 'dot1qvlancreationtime'], name, value)
class Dot1qVlanStatus(Enum):
"""
Dot1qVlanStatus (Enum Class)
This object indicates the status of this entry.
other(1) \- this entry is currently in use, but the
conditions under which it will remain so differ
from the following values.
permanent(2) \- this entry, corresponding to an entry
in dot1qVlanStaticTable, is currently in use and
will remain so after the next reset of the
device. The port lists for this entry include
ports from the equivalent dot1qVlanStaticTable
entry and ports learned dynamically.
dynamicGvrp(3) \- this entry is currently in use
and will remain so until removed by GVRP. There
is no static entry for this VLAN, and it will be
removed when the last port leaves the VLAN.
.. data:: other = 1
.. data:: permanent = 2
.. data:: dynamicGvrp = 3
"""
other = Enum.YLeaf(1, "other")
permanent = Enum.YLeaf(2, "permanent")
dynamicGvrp = Enum.YLeaf(3, "dynamicGvrp")
class Dot1qVlanStaticTable(Entity):
"""
A table containing static configuration information for
each VLAN configured into the device by (local or
network) management. All entries are permanent and will
be restored after the device is reset.
.. attribute:: dot1qvlanstaticentry
Static information for a VLAN configured into the device by (local or network) management
**type**\: list of :py:class:`Dot1qVlanStaticEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanStaticTable.Dot1qVlanStaticEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qVlanStaticTable, self).__init__()
self.yang_name = "dot1qVlanStaticTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qVlanStaticEntry", ("dot1qvlanstaticentry", QBRIDGEMIB.Dot1qVlanStaticTable.Dot1qVlanStaticEntry))])
self._leafs = OrderedDict()
self.dot1qvlanstaticentry = YList(self)
self._segment_path = lambda: "dot1qVlanStaticTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qVlanStaticTable, [], name, value)
class Dot1qVlanStaticEntry(Entity):
"""
Static information for a VLAN configured into the
device by (local or network) management.
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qvlanstaticname
An administratively assigned string, which may be used to identify the VLAN
**type**\: str
**length:** 0..32
**config**\: False
.. attribute:: dot1qvlanstaticegressports
The set of ports that are permanently assigned to the egress list for this VLAN by management. Changes to a bit in this object affect the per\-port, per\-VLAN Registrar control for Registration Fixed for the relevant GVRP state machine on each port. A port may not be added in this set if it is already a member of the set of ports in dot1qVlanForbiddenEgressPorts. The default value of this object is a string of zeros of appropriate length, indicating not fixed
**type**\: str
**config**\: False
.. attribute:: dot1qvlanforbiddenegressports
The set of ports that are prohibited by management from being included in the egress list for this VLAN. Changes to this object that cause a port to be included or excluded affect the per\-port, per\-VLAN Registrar control for Registration Forbidden for the relevant GVRP state machine on each port. A port may not be added in this set if it is already a member of the set of ports in dot1qVlanStaticEgressPorts. The default value of this object is a string of zeros of appropriate length, excluding all ports from the forbidden set
**type**\: str
**config**\: False
.. attribute:: dot1qvlanstaticuntaggedports
The set of ports that should transmit egress packets for this VLAN as untagged. The default value of this object for the default VLAN (dot1qVlanIndex = 1) is a string of appropriate length including all ports. There is no specified default for other VLANs. If a device agent cannot support the set of ports being set, then it will reject the set operation with an error. For example, a manager might attempt to set more than one VLAN to be untagged on egress where the device does not support this IEEE 802.1Q option
**type**\: str
**config**\: False
.. attribute:: dot1qvlanstaticrowstatus
This object indicates the status of this entry
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qVlanStaticTable.Dot1qVlanStaticEntry, self).__init__()
self.yang_name = "dot1qVlanStaticEntry"
self.yang_parent_name = "dot1qVlanStaticTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qvlanstaticname', (YLeaf(YType.str, 'dot1qVlanStaticName'), ['str'])),
('dot1qvlanstaticegressports', (YLeaf(YType.str, 'dot1qVlanStaticEgressPorts'), ['str'])),
('dot1qvlanforbiddenegressports', (YLeaf(YType.str, 'dot1qVlanForbiddenEgressPorts'), ['str'])),
('dot1qvlanstaticuntaggedports', (YLeaf(YType.str, 'dot1qVlanStaticUntaggedPorts'), ['str'])),
('dot1qvlanstaticrowstatus', (YLeaf(YType.enumeration, 'dot1qVlanStaticRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.dot1qvlanindex = None
self.dot1qvlanstaticname = None
self.dot1qvlanstaticegressports = None
self.dot1qvlanforbiddenegressports = None
self.dot1qvlanstaticuntaggedports = None
self.dot1qvlanstaticrowstatus = None
self._segment_path = lambda: "dot1qVlanStaticEntry" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qVlanStaticTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qVlanStaticTable.Dot1qVlanStaticEntry, ['dot1qvlanindex', 'dot1qvlanstaticname', 'dot1qvlanstaticegressports', 'dot1qvlanforbiddenegressports', 'dot1qvlanstaticuntaggedports', 'dot1qvlanstaticrowstatus'], name, value)
class Dot1qPortVlanStatisticsTable(Entity):
"""
A table containing per\-port, per\-VLAN statistics for
traffic received. Separate objects are provided for both the
most\-significant and least\-significant bits of statistics
counters for ports that are associated with this transparent
bridge. The most\-significant bit objects are only required on
high\-capacity interfaces, as defined in the conformance clauses
for these objects. This mechanism is provided as a way to read
64\-bit counters for agents that support only SNMPv1.
Note that the reporting of most\-significant and least\-
significant counter bits separately runs the risk of missing
an overflow of the lower bits in the interval between sampling.
The manager must be aware of this possibility, even within the
same varbindlist, when interpreting the results of a request or
asynchronous notification.
.. attribute:: dot1qportvlanstatisticsentry
Traffic statistics for a VLAN on an interface
**type**\: list of :py:class:`Dot1qPortVlanStatisticsEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qPortVlanStatisticsTable.Dot1qPortVlanStatisticsEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qPortVlanStatisticsTable, self).__init__()
self.yang_name = "dot1qPortVlanStatisticsTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qPortVlanStatisticsEntry", ("dot1qportvlanstatisticsentry", QBRIDGEMIB.Dot1qPortVlanStatisticsTable.Dot1qPortVlanStatisticsEntry))])
self._leafs = OrderedDict()
self.dot1qportvlanstatisticsentry = YList(self)
self._segment_path = lambda: "dot1qPortVlanStatisticsTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qPortVlanStatisticsTable, [], name, value)
class Dot1qPortVlanStatisticsEntry(Entity):
"""
Traffic statistics for a VLAN on an interface.
.. attribute:: dot1dbaseport (key)
**type**\: int
**range:** 1..65535
**refers to**\: :py:class:`dot1dbaseport <ydk.models.cisco_ios_xe.BRIDGE_MIB.BRIDGEMIB.Dot1dBasePortTable.Dot1dBasePortEntry>`
**config**\: False
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qtpvlanportinframes
The number of valid frames received by this port from its segment that were classified as belonging to this VLAN. Note that a frame received on this port is counted by this object if and only if it is for a protocol being processed by the local forwarding process for this VLAN. This object includes received bridge management frames classified as belonging to this VLAN (e.g., GMRP, but not GVRP or STP
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qtpvlanportoutframes
The number of valid frames transmitted by this port to its segment from the local forwarding process for this VLAN. This includes bridge management frames originated by this device that are classified as belonging to this VLAN (e.g., GMRP, but not GVRP or STP)
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qtpvlanportindiscards
The number of valid frames received by this port from its segment that were classified as belonging to this VLAN and that were discarded due to VLAN\-related reasons. Specifically, the IEEE 802.1Q counters for Discard Inbound and Discard on Ingress Filtering
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qtpvlanportinoverflowframes
The number of times the associated dot1qTpVlanPortInFrames counter has overflowed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qtpvlanportoutoverflowframes
The number of times the associated dot1qTpVlanPortOutFrames counter has overflowed
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qtpvlanportinoverflowdiscards
The number of times the associated dot1qTpVlanPortInDiscards counter has overflowed
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qPortVlanStatisticsTable.Dot1qPortVlanStatisticsEntry, self).__init__()
self.yang_name = "dot1qPortVlanStatisticsEntry"
self.yang_parent_name = "dot1qPortVlanStatisticsTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1dbaseport','dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1dbaseport', (YLeaf(YType.str, 'dot1dBasePort'), ['int'])),
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qtpvlanportinframes', (YLeaf(YType.uint32, 'dot1qTpVlanPortInFrames'), ['int'])),
('dot1qtpvlanportoutframes', (YLeaf(YType.uint32, 'dot1qTpVlanPortOutFrames'), ['int'])),
('dot1qtpvlanportindiscards', (YLeaf(YType.uint32, 'dot1qTpVlanPortInDiscards'), ['int'])),
('dot1qtpvlanportinoverflowframes', (YLeaf(YType.uint32, 'dot1qTpVlanPortInOverflowFrames'), ['int'])),
('dot1qtpvlanportoutoverflowframes', (YLeaf(YType.uint32, 'dot1qTpVlanPortOutOverflowFrames'), ['int'])),
('dot1qtpvlanportinoverflowdiscards', (YLeaf(YType.uint32, 'dot1qTpVlanPortInOverflowDiscards'), ['int'])),
])
self.dot1dbaseport = None
self.dot1qvlanindex = None
self.dot1qtpvlanportinframes = None
self.dot1qtpvlanportoutframes = None
self.dot1qtpvlanportindiscards = None
self.dot1qtpvlanportinoverflowframes = None
self.dot1qtpvlanportoutoverflowframes = None
self.dot1qtpvlanportinoverflowdiscards = None
self._segment_path = lambda: "dot1qPortVlanStatisticsEntry" + "[dot1dBasePort='" + str(self.dot1dbaseport) + "']" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qPortVlanStatisticsTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qPortVlanStatisticsTable.Dot1qPortVlanStatisticsEntry, ['dot1dbaseport', 'dot1qvlanindex', 'dot1qtpvlanportinframes', 'dot1qtpvlanportoutframes', 'dot1qtpvlanportindiscards', 'dot1qtpvlanportinoverflowframes', 'dot1qtpvlanportoutoverflowframes', 'dot1qtpvlanportinoverflowdiscards'], name, value)
class Dot1qPortVlanHCStatisticsTable(Entity):
"""
A table containing per\-port, per\-VLAN statistics for
traffic on high\-capacity interfaces.
.. attribute:: dot1qportvlanhcstatisticsentry
Traffic statistics for a VLAN on a high\-capacity interface
**type**\: list of :py:class:`Dot1qPortVlanHCStatisticsEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable.Dot1qPortVlanHCStatisticsEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable, self).__init__()
self.yang_name = "dot1qPortVlanHCStatisticsTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qPortVlanHCStatisticsEntry", ("dot1qportvlanhcstatisticsentry", QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable.Dot1qPortVlanHCStatisticsEntry))])
self._leafs = OrderedDict()
self.dot1qportvlanhcstatisticsentry = YList(self)
self._segment_path = lambda: "dot1qPortVlanHCStatisticsTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable, [], name, value)
class Dot1qPortVlanHCStatisticsEntry(Entity):
"""
Traffic statistics for a VLAN on a high\-capacity
interface.
.. attribute:: dot1dbaseport (key)
**type**\: int
**range:** 1..65535
**refers to**\: :py:class:`dot1dbaseport <ydk.models.cisco_ios_xe.BRIDGE_MIB.BRIDGEMIB.Dot1dBasePortTable.Dot1dBasePortEntry>`
**config**\: False
.. attribute:: dot1qvlanindex (key)
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`dot1qvlanindex <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qVlanCurrentTable.Dot1qVlanCurrentEntry>`
**config**\: False
.. attribute:: dot1qtpvlanporthcinframes
The number of valid frames received by this port from its segment that were classified as belonging to this VLAN. Note that a frame received on this port is counted by this object if and only if it is for a protocol being processed by the local forwarding process for this VLAN. This object includes received bridge management frames classified as belonging to this VLAN (e.g., GMRP, but not GVRP or STP)
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: dot1qtpvlanporthcoutframes
The number of valid frames transmitted by this port to its segment from the local forwarding process for this VLAN. This includes bridge management frames originated by this device that are classified as belonging to this VLAN (e.g., GMRP, but not GVRP or STP)
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: dot1qtpvlanporthcindiscards
The number of valid frames received by this port from its segment that were classified as belonging to this VLAN and that were discarded due to VLAN\-related reasons. Specifically, the IEEE 802.1Q counters for Discard Inbound and Discard on Ingress Filtering
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable.Dot1qPortVlanHCStatisticsEntry, self).__init__()
self.yang_name = "dot1qPortVlanHCStatisticsEntry"
self.yang_parent_name = "dot1qPortVlanHCStatisticsTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1dbaseport','dot1qvlanindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1dbaseport', (YLeaf(YType.str, 'dot1dBasePort'), ['int'])),
('dot1qvlanindex', (YLeaf(YType.str, 'dot1qVlanIndex'), ['int'])),
('dot1qtpvlanporthcinframes', (YLeaf(YType.uint64, 'dot1qTpVlanPortHCInFrames'), ['int'])),
('dot1qtpvlanporthcoutframes', (YLeaf(YType.uint64, 'dot1qTpVlanPortHCOutFrames'), ['int'])),
('dot1qtpvlanporthcindiscards', (YLeaf(YType.uint64, 'dot1qTpVlanPortHCInDiscards'), ['int'])),
])
self.dot1dbaseport = None
self.dot1qvlanindex = None
self.dot1qtpvlanporthcinframes = None
self.dot1qtpvlanporthcoutframes = None
self.dot1qtpvlanporthcindiscards = None
self._segment_path = lambda: "dot1qPortVlanHCStatisticsEntry" + "[dot1dBasePort='" + str(self.dot1dbaseport) + "']" + "[dot1qVlanIndex='" + str(self.dot1qvlanindex) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qPortVlanHCStatisticsTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qPortVlanHCStatisticsTable.Dot1qPortVlanHCStatisticsEntry, ['dot1dbaseport', 'dot1qvlanindex', 'dot1qtpvlanporthcinframes', 'dot1qtpvlanporthcoutframes', 'dot1qtpvlanporthcindiscards'], name, value)
class Dot1qLearningConstraintsTable(Entity):
"""
A table containing learning constraints for sets of
Shared and Independent VLANs.
.. attribute:: dot1qlearningconstraintsentry
A learning constraint defined for a VLAN
**type**\: list of :py:class:`Dot1qLearningConstraintsEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qLearningConstraintsTable, self).__init__()
self.yang_name = "dot1qLearningConstraintsTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1qLearningConstraintsEntry", ("dot1qlearningconstraintsentry", QBRIDGEMIB.Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry))])
self._leafs = OrderedDict()
self.dot1qlearningconstraintsentry = YList(self)
self._segment_path = lambda: "dot1qLearningConstraintsTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qLearningConstraintsTable, [], name, value)
class Dot1qLearningConstraintsEntry(Entity):
"""
A learning constraint defined for a VLAN.
.. attribute:: dot1qconstraintvlan (key)
The index of the row in dot1qVlanCurrentTable for the VLAN constrained by this entry
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: dot1qconstraintset (key)
The identity of the constraint set to which dot1qConstraintVlan belongs. These values may be chosen by the management station
**type**\: int
**range:** 0..65535
**config**\: False
.. attribute:: dot1qconstrainttype
The type of constraint this entry defines. independent(1) \- the VLAN, dot1qConstraintVlan, uses a filtering database independent from all other VLANs in the same set, defined by dot1qConstraintSet. shared(2) \- the VLAN, dot1qConstraintVlan, shares the same filtering database as all other VLANs in the same set, defined by dot1qConstraintSet
**type**\: :py:class:`Dot1qConstraintType <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry.Dot1qConstraintType>`
**config**\: False
.. attribute:: dot1qconstraintstatus
The status of this entry
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry, self).__init__()
self.yang_name = "dot1qLearningConstraintsEntry"
self.yang_parent_name = "dot1qLearningConstraintsTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1qconstraintvlan','dot1qconstraintset']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1qconstraintvlan', (YLeaf(YType.uint32, 'dot1qConstraintVlan'), ['int'])),
('dot1qconstraintset', (YLeaf(YType.int32, 'dot1qConstraintSet'), ['int'])),
('dot1qconstrainttype', (YLeaf(YType.enumeration, 'dot1qConstraintType'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry.Dot1qConstraintType')])),
('dot1qconstraintstatus', (YLeaf(YType.enumeration, 'dot1qConstraintStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.dot1qconstraintvlan = None
self.dot1qconstraintset = None
self.dot1qconstrainttype = None
self.dot1qconstraintstatus = None
self._segment_path = lambda: "dot1qLearningConstraintsEntry" + "[dot1qConstraintVlan='" + str(self.dot1qconstraintvlan) + "']" + "[dot1qConstraintSet='" + str(self.dot1qconstraintset) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1qLearningConstraintsTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1qLearningConstraintsTable.Dot1qLearningConstraintsEntry, ['dot1qconstraintvlan', 'dot1qconstraintset', 'dot1qconstrainttype', 'dot1qconstraintstatus'], name, value)
class Dot1qConstraintType(Enum):
"""
Dot1qConstraintType (Enum Class)
The type of constraint this entry defines.
independent(1) \- the VLAN, dot1qConstraintVlan,
uses a filtering database independent from all
other VLANs in the same set, defined by
dot1qConstraintSet.
shared(2) \- the VLAN, dot1qConstraintVlan, shares
the same filtering database as all other VLANs
in the same set, defined by dot1qConstraintSet.
.. data:: independent = 1
.. data:: shared = 2
"""
independent = Enum.YLeaf(1, "independent")
shared = Enum.YLeaf(2, "shared")
class Dot1vProtocolGroupTable(Entity):
"""
A table that contains mappings from Protocol
Templates to Protocol Group Identifiers used for
Port\-and\-Protocol\-based VLAN Classification.
.. attribute:: dot1vprotocolgroupentry
A mapping from a Protocol Template to a Protocol Group Identifier
**type**\: list of :py:class:`Dot1vProtocolGroupEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1vProtocolGroupTable, self).__init__()
self.yang_name = "dot1vProtocolGroupTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1vProtocolGroupEntry", ("dot1vprotocolgroupentry", QBRIDGEMIB.Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry))])
self._leafs = OrderedDict()
self.dot1vprotocolgroupentry = YList(self)
self._segment_path = lambda: "dot1vProtocolGroupTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1vProtocolGroupTable, [], name, value)
class Dot1vProtocolGroupEntry(Entity):
"""
A mapping from a Protocol Template to a Protocol
Group Identifier.
.. attribute:: dot1vprotocoltemplateframetype (key)
The data\-link encapsulation format or the 'detagged\_frame\_type' in a Protocol Template
**type**\: :py:class:`Dot1vProtocolTemplateFrameType <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry.Dot1vProtocolTemplateFrameType>`
**config**\: False
.. attribute:: dot1vprotocoltemplateprotocolvalue (key)
The identification of the protocol above the data\-link layer in a Protocol Template. Depending on the frame type, the octet string will have one of the following values\: For 'ethernet', 'rfc1042' and 'snap8021H', this is the 16\-bit (2\-octet) IEEE 802.3 Type Field. For 'snapOther', this is the 40\-bit (5\-octet) PID. For 'llcOther', this is the 2\-octet IEEE 802.2 Link Service Access Point (LSAP) pair\: first octet for Destination Service Access Point (DSAP) and second octet for Source Service Access Point (SSAP)
**type**\: str
**length:** 2..2 \| 5..5
**config**\: False
.. attribute:: dot1vprotocolgroupid
Represents a group of protocols that are associated together when assigning a VID to a frame
**type**\: int
**range:** 0..2147483647
**config**\: False
.. attribute:: dot1vprotocolgrouprowstatus
This object indicates the status of this entry
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry, self).__init__()
self.yang_name = "dot1vProtocolGroupEntry"
self.yang_parent_name = "dot1vProtocolGroupTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1vprotocoltemplateframetype','dot1vprotocoltemplateprotocolvalue']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1vprotocoltemplateframetype', (YLeaf(YType.enumeration, 'dot1vProtocolTemplateFrameType'), [('ydk.models.cisco_ios_xe.Q_BRIDGE_MIB', 'QBRIDGEMIB', 'Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry.Dot1vProtocolTemplateFrameType')])),
('dot1vprotocoltemplateprotocolvalue', (YLeaf(YType.str, 'dot1vProtocolTemplateProtocolValue'), ['str'])),
('dot1vprotocolgroupid', (YLeaf(YType.int32, 'dot1vProtocolGroupId'), ['int'])),
('dot1vprotocolgrouprowstatus', (YLeaf(YType.enumeration, 'dot1vProtocolGroupRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.dot1vprotocoltemplateframetype = None
self.dot1vprotocoltemplateprotocolvalue = None
self.dot1vprotocolgroupid = None
self.dot1vprotocolgrouprowstatus = None
self._segment_path = lambda: "dot1vProtocolGroupEntry" + "[dot1vProtocolTemplateFrameType='" + str(self.dot1vprotocoltemplateframetype) + "']" + "[dot1vProtocolTemplateProtocolValue='" + str(self.dot1vprotocoltemplateprotocolvalue) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1vProtocolGroupTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1vProtocolGroupTable.Dot1vProtocolGroupEntry, ['dot1vprotocoltemplateframetype', 'dot1vprotocoltemplateprotocolvalue', 'dot1vprotocolgroupid', 'dot1vprotocolgrouprowstatus'], name, value)
class Dot1vProtocolTemplateFrameType(Enum):
"""
Dot1vProtocolTemplateFrameType (Enum Class)
The data\-link encapsulation format or the
'detagged\_frame\_type' in a Protocol Template.
.. data:: ethernet = 1
.. data:: rfc1042 = 2
.. data:: snap8021H = 3
.. data:: snapOther = 4
.. data:: llcOther = 5
"""
ethernet = Enum.YLeaf(1, "ethernet")
rfc1042 = Enum.YLeaf(2, "rfc1042")
snap8021H = Enum.YLeaf(3, "snap8021H")
snapOther = Enum.YLeaf(4, "snapOther")
llcOther = Enum.YLeaf(5, "llcOther")
class Dot1vProtocolPortTable(Entity):
"""
A table that contains VID sets used for
Port\-and\-Protocol\-based VLAN Classification.
.. attribute:: dot1vprotocolportentry
A VID set for a port
**type**\: list of :py:class:`Dot1vProtocolPortEntry <ydk.models.cisco_ios_xe.Q_BRIDGE_MIB.QBRIDGEMIB.Dot1vProtocolPortTable.Dot1vProtocolPortEntry>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1vProtocolPortTable, self).__init__()
self.yang_name = "dot1vProtocolPortTable"
self.yang_parent_name = "Q-BRIDGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dot1vProtocolPortEntry", ("dot1vprotocolportentry", QBRIDGEMIB.Dot1vProtocolPortTable.Dot1vProtocolPortEntry))])
self._leafs = OrderedDict()
self.dot1vprotocolportentry = YList(self)
self._segment_path = lambda: "dot1vProtocolPortTable"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1vProtocolPortTable, [], name, value)
class Dot1vProtocolPortEntry(Entity):
"""
A VID set for a port.
.. attribute:: dot1dbaseport (key)
**type**\: int
**range:** 1..65535
**refers to**\: :py:class:`dot1dbaseport <ydk.models.cisco_ios_xe.BRIDGE_MIB.BRIDGEMIB.Dot1dBasePortTable.Dot1dBasePortEntry>`
**config**\: False
.. attribute:: dot1vprotocolportgroupid (key)
Designates a group of protocols in the Protocol Group Database
**type**\: int
**range:** 1..2147483647
**config**\: False
.. attribute:: dot1vprotocolportgroupvid
The VID associated with a group of protocols for each port
**type**\: int
**range:** 1..4094
**config**\: False
.. attribute:: dot1vprotocolportrowstatus
This object indicates the status of this entry
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
**config**\: False
"""
_prefix = 'Q-BRIDGE-MIB'
_revision = '2006-01-09'
def __init__(self):
super(QBRIDGEMIB.Dot1vProtocolPortTable.Dot1vProtocolPortEntry, self).__init__()
self.yang_name = "dot1vProtocolPortEntry"
self.yang_parent_name = "dot1vProtocolPortTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['dot1dbaseport','dot1vprotocolportgroupid']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dot1dbaseport', (YLeaf(YType.str, 'dot1dBasePort'), ['int'])),
('dot1vprotocolportgroupid', (YLeaf(YType.int32, 'dot1vProtocolPortGroupId'), ['int'])),
('dot1vprotocolportgroupvid', (YLeaf(YType.int32, 'dot1vProtocolPortGroupVid'), ['int'])),
('dot1vprotocolportrowstatus', (YLeaf(YType.enumeration, 'dot1vProtocolPortRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.dot1dbaseport = None
self.dot1vprotocolportgroupid = None
self.dot1vprotocolportgroupvid = None
self.dot1vprotocolportrowstatus = None
self._segment_path = lambda: "dot1vProtocolPortEntry" + "[dot1dBasePort='" + str(self.dot1dbaseport) + "']" + "[dot1vProtocolPortGroupId='" + str(self.dot1vprotocolportgroupid) + "']"
self._absolute_path = lambda: "Q-BRIDGE-MIB:Q-BRIDGE-MIB/dot1vProtocolPortTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(QBRIDGEMIB.Dot1vProtocolPortTable.Dot1vProtocolPortEntry, ['dot1dbaseport', 'dot1vprotocolportgroupid', 'dot1vprotocolportgroupvid', 'dot1vprotocolportrowstatus'], name, value)
def clone_ptr(self):
self._top_entity = QBRIDGEMIB()
return self._top_entity
|
from argparse import ArgumentParser
from sftp import SpanPredictor
parser = ArgumentParser('predict spans')
parser.add_argument(
'-m', help='model path', type=str, default='https://gqin.top/sftp-fn'
)
args = parser.parse_args()
# Specify the path to the model and the device that the model resides.
# Here we use -1 device, which indicates CPU.
predictor = SpanPredictor.from_path(
args.m,
cuda_device=-1,
)
# Input sentence could be a string. It will be tokenized by SpacyTokenizer, and the tokens will be returned
# along with the predictions.
input1 = "Bob saw Alice eating an apple."
print("Example 1 with input:", input1)
output1 = predictor.predict_sentence(input1)
output1.span.tree(output1.sentence)
# Input sentence might already be tokenized. In this situation, we'll respect the tokenization.
# The output will be based on the given tokens.
input2 = ["Bob", "saw", "Alice", "eating", "an", "apple", "."]
print('-'*20+"\nExample 2 with input:", input2)
output2 = predictor.predict_sentence(input2)
output2.span.tree(output2.sentence)
# To be efficient, you can input all the sentences as a whole.
# Note: The predictor will do batching itself.
# Instead of specifying the batch size, you should specify `max_tokens`, which
# indicates the maximum tokens that could be put into one batch.
# The predictor will dynamically batch the input sentences efficiently,
# and the outputs will be in the same order as the inputs.
output3 = predictor.predict_batch_sentences([input1, input2], max_tokens=512, progress=True)
print('-'*20+"\nExample 3 with both inputs:")
for i in range(2):
output3[i].span.tree(output3[i].sentence)
# For SRL, we can limit the decoding depth if we only need the events prediction. (save 13% time)
# And can possibly limit #spans to speedup.
predictor.economize(max_decoding_spans=20, max_recursion_depth=1)
output4 = predictor.predict_batch_sentences([input2], max_tokens=512)
print('-'*20+"\nExample 4 with input:", input2)
output4[0].span.tree(output4[0].sentence)
|
from rest_framework import viewsets
from . import models, serializers
class UserViewSet(viewsets.ModelViewSet):
queryset = models.User.objects.all()
serializer_class = serializers.UserSerializer
|
#!/usr/bin/python
import ospray
from ospray import *
def main() :
imgSize = [1024,768]
## camera
cam_pos = [0, 0, 0]
cam_up = [0, 1, 0]
cam_view = [0.1, 0, 1]
## triangle mesh data
vertex = [
-1.0, -1.0, 3.0, 0,
-1.0, 1.0, 3.0, 0,
1.0, -1.0, 3.0, 0,
0.1, 0.1, 0.3, 0
]
color = [
0.9, 0.5, 0.5, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.5, 0.9, 0.5, 1.0
]
index = [
0, 1, 2,
1, 2, 3
]
## initialize OSPRay; OSPRay parses (and removes) its commandline
## parameters, e.g. "--osp:debug"
ospray.ospInit()
## create and setup camera
camera = ospNewCamera("perspective")
ospSetf(camera, "aspect", imgSize[0]/imgSize[1])
ospSet3fv(camera, "pos", cam_pos);
ospSet3fv(camera, "dir", cam_view);
ospSet3fv(camera, "up", cam_up);
## commit each object to indicate modifications are done
ospCommit(camera)
## create and setup model and mesh
mesh = ospNewGeometry("triangles")
#TODO: data = ospNewData(4, OSP_FLOAT3A, vertex, 0)
data = ospNewData(4, 'float3a', vertex)
## OSP_FLOAT3 format is also supported for vertex positions
ospCommit(data)
ospSetData(mesh, "vertex", data)
ospRelease(data) ##we are done using this handle
data = ospNewData(4, 'float4', color)
ospCommit(data)
ospSetData(mesh, "vertex.color", data)
ospRelease(data) ## we are done using this handle
data = ospNewData(2, 'int3', index)
## OSP_INT4 format is also supported for triangle indices
ospCommit(data)
ospSetData(mesh, "index", data)
ospRelease(data) ## we are done using this handle
ospCommit(mesh)
world = ospNewModel()
ospAddGeometry(world, mesh)
ospRelease(mesh) ## we are done using this handle
ospCommit(world)
## create renderer
renderer = ospNewRenderer("scivis")
## choose Scientific Visualization renderer
## create and setup light for Ambient Occlusion
light = ospNewLight("ambient")
ospCommit(light)
lights = ospNewData(1, 'OSP_LIGHT', [ light ])
ospCommit(lights)
## complete setup of renderer
ospSet1i(renderer, "aoSamples", 1)
ospSet1f(renderer, "bgColor", 1.0) ## white, transparent
ospSetObject(renderer, "model", world)
ospSetObject(renderer, "camera", camera)
ospSetObject(renderer, "lights", lights)
ospCommit(renderer)
## create and setup framebuffer
framebuffer = ospNewFrameBuffer(imgSize, "srgba", [ "color", "accum" ])
ospFrameBufferClear(framebuffer, [ "color", "accum" ]);
## render one frame
print("rendering first frame")
ospRenderFrame(framebuffer, renderer, ["color","accum"])
## access framebuffer and write its content as PPM file
##const uint32_t * fb = (uint32_t*)ospMapFrameBuffer(framebuffer, OSP_FB_COLOR)
print("saving frame buffer to 'firstFrame.png'")
ospFrameBufferSave("firstFrame.png", framebuffer, imgSize, "srgba")
##ospUnmapFrameBuffer(fb, framebuffer)
## render 10 more frames, which are accumulated to result in a better converged image
for frame in range(0,10) :
print("accumulating frame #"+str(frame))
ospRenderFrame(framebuffer, renderer, ["color","accum"])
##fb = (uint32_t*)ospMapFrameBuffer(framebuffer, OSP_FB_COLOR)
##writePPM("accumulatedFrame.ppm", &imgSize, fb)
##ospUnmapFrameBuffer(fb, framebuffer)
print("saving accumulated frame buffer to 'accumulatedFrame.png'")
ospFrameBufferSave("accumulatedFrame.png", framebuffer, imgSize, "srgba")
## final cleanups
ospRelease(renderer)
ospRelease(camera)
ospRelease(lights)
ospRelease(light)
ospRelease(framebuffer)
ospRelease(world)
ospShutdown()
main()
|
from app.models import Comment,User
from app import db
import unittest
class CommentTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Pitch class
'''
def setUp(self):
self.user_Dunco = User(username = 'Dunco',password = 'dunco', email = 'aruncodunco@gmail.com')
self.new_comment = Comment(pitch_id=12345,pitch_title='Review for movies',image_path="https://image.tmdb.org/t/p/w500/jdjdjdjn",movie_review='best series ever watched',user = self.user_Dunco )
def tearDown(self):
Comment.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.pitch_id,12345)
self.assertEquals(self.new_comment.pitch_title,'Review for movies')
self.assertEquals(self.new_comment.image_path,"https://image.tmdb.org/t/p/w500/jdjdjdjn")
self.assertEquals(self.new_comment.pitch_comment,'This movie is the best thing since sliced bread')
self.assertEquals(self.new_comment.user,self.user_Dunco)
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comment.query.all())>0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comments = Comment.get_comments(12345)
self.assertTrue(len(got_comments) == 1)
|
# pylint: disable=W0401,W0611,W0231
"""
More info: http://docs.jasminsms.com/en/latest/interception/index.html
"""
from jasmin.routing.Filters import Filter
from jasmin.routing.Routables import Routable
from jasmin.routing.jasminApi import *
class InvalidInterceptorParameterError(Exception):
"""Raised when a parameter is not an instance of a desired class.
Used for validating inputs
"""
class InvalidInterceptorFilterError(Exception):
"""Raised when an interceptor is instanciated with a non-compatible type"""
class Interceptor(object):
"""Generic Interceptor:
Interceptor contain a couple of [Filter(s), InterceptorScript]
When more than one Filter is given, matching these filters will use the AND operator
"""
type = 'generic'
_str = 'generic'
filters = []
script = None
def __init__(self, filters, script):
if not isinstance(script, InterceptorScript):
raise InvalidInterceptorParameterError("script is not an instance of InterceptorScript")
if not isinstance(filters, list):
raise InvalidInterceptorParameterError("filters must be a list")
for _filter in filters:
if not isinstance(_filter, Filter):
raise InvalidInterceptorParameterError(
"filter must be an instance of Filter, %s found" % type(_filter)
)
if not self.type in _filter.usedFor:
raise InvalidInterceptorFilterError(
"filter types (%s) is not compatible with this interceptor type (%s)" % (
_filter.usedFor, self.type
))
self.filters = filters
self.script = script
self._str = '%s/%s' % (self.__class__.__name__, repr(script))
def __str__(self):
return self._str
def getScript(self):
return self.script
def matchFilters(self, routable):
"""If filters match routable, the script will be returned, if not, None will be returned
"""
if not isinstance(routable, Routable):
raise InvalidInterceptorParameterError("routable is not an instance of Routable")
for _filter in self.filters:
if not _filter.match(routable):
return None
return self.getScript()
class DefaultInterceptor(Interceptor):
"""This is a default interceptor which can contain one script
"""
type = 'default'
def __init__(self, script):
"""DefaultInterceptor can be for MO or MT messages"""
if not isinstance(script, InterceptorScript):
raise InvalidInterceptorParameterError("script is not an instance of InterceptorScript")
self.script = script
self._str = '%s/%s' % (self.__class__.__name__, repr(script))
def matchFilters(self, routable):
return self.getScript()
class MTInterceptor(Interceptor):
"""Generic MT Interceptor
"""
type = 'mt'
class MOInterceptor(Interceptor):
"""Generic MO Interceptor
"""
type = 'mo'
class StaticMOInterceptor(MOInterceptor):
"""Return one unique interceptor
"""
class StaticMTInterceptor(MTInterceptor):
"""Return one unique interceptor
"""
|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Exponential Moving Average (EMA) classes and functions."""
from __future__ import annotations
import copy
import itertools
import logging
from typing import Any, Dict, List, Optional, Union
import torch
from composer.core import Algorithm, Event, State, Time, TimeUnit
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['EMA', 'compute_ema']
def compute_ema(model: T_Model, ema_model: T_Model, smoothing: float = 0.99):
r"""Updates the weights of ``ema_model`` to be closer to the weights of ``model`` according to an exponential
weighted average. Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
The update to ``ema_model`` happens in place.
The half life of the weights for terms in the average is given by
.. math::
t_{1/2} = -\frac{\log(2)}{\log(smoothing)}
Therefore to set smoothing to obtain a target half life, set smoothing according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Args:
model (torch.nn.Module): the model containing the latest weights to use to update the moving average weights.
ema_model (torch.nn.Module): the model containing the moving average weights to be updated.
smoothing (float, optional): the coefficient representing the degree to which older observations are kept.
Must be in the interval :math:`(0, 1)`. Default: ``0.99``.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
ema_model = models.resnet50()
cf.compute_ema(model, ema_model, smoothing=0.9)
"""
with torch.no_grad():
model_params = itertools.chain(model.parameters(), model.buffers())
ema_model_params = itertools.chain(ema_model.parameters(), ema_model.buffers())
for ema_param, model_param in zip(ema_model_params, model_params):
model_param = model_param.detach()
ema_param.copy_(ema_param * smoothing + (1. - smoothing) * model_param)
class EMA(Algorithm):
r"""Maintains a shadow model with weights that follow the exponential moving average of the trained model weights.
Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
Where the smoothing is determined from ``half_life`` according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Model evaluation is done with the moving average weights, which can result in better generalization. Because of the
shadow models, EMA triples the model's memory consumption. Note that this does not mean that the total memory
required doubles, since stored activations and the optimizer state are not duplicated. EMA also uses a small
amount of extra compute to update the moving average weights.
See the :doc:`Method Card </method_cards/ema>` for more details.
Args:
half_life (str): The time string specifying the half life for terms in the average. A longer half life means
old information is remembered longer, a shorter half life means old information is discared sooner.
A half life of ``0`` means no averaging is done, an infinite half life means no update is done. Currently
only units of epoch ('ep') and batch ('ba'). Value must be an integer.
update_interval (str, optional): The time string specifying the period at which updates are done. For example,
an ``update_interval='1ep'`` means updates are done every epoch, while ``update_interval='10ba'`` means
updates are done once every ten batches. Units must match the units used to specify ``half_life``. If not
specified, ``update_interval`` will default to ``1`` in the units of ``half_life``. Value must be an
integer. Default: ``None``.
train_with_ema_weights (bool, optional): An experimental feature that uses the ema weights as the training
weights. In most cases should be left as ``False``. Default ``False``.
Example:
.. testcode::
from composer.algorithms import EMA
algorithm = EMA(half_life='50ba', update_interval='1ba')
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self, half_life: str, update_interval: Optional[str] = None, train_with_ema_weights: bool = False):
self.half_life = half_life
self.update_interval = update_interval
self.train_with_ema_weights = train_with_ema_weights
self.ema_model = None
self.training_model = None
self.serialized_attributes = [
'ema_model',
'training_model',
]
# Check timestrings are parsable and convert into time object
try:
self.half_life = Time.from_timestring(half_life)
except ValueError as error:
raise ValueError(f'Invalid time string for parameter half_life') from error
# Create the update interval if none is specified
if self.update_interval is None:
self.update_interval = Time(1, self.half_life.unit)
elif type(update_interval) is str:
try:
self.update_interval = Time.from_timestring(update_interval)
except ValueError as error:
raise ValueError(f'Invalid time string for parameter update_interval') from error
else:
raise ValueError(f'update_interval must be None or a time string.')
# Verify that the units of half_life and update_interval are compatible
if self.half_life.unit != self.update_interval.unit:
raise ValueError(f'Units of half_life and update_interval must match.')
# Verify that the time strings have supported units.
if self.half_life.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:
raise ValueError(f'Invalid time unit for parameter half_life: '
f'{self.update_interval.unit}')
# Calculate the appropriate weighting for the moving average
self.smoothing = 2**(-(self.update_interval.value / self.half_life.value))
# Construct the appropriate matching events
self.match_events = [Event.FIT_START, Event.EVAL_START, Event.EVAL_END]
if self.half_life.unit == TimeUnit.EPOCH:
self.match_events.append(Event.EPOCH_END)
if self.half_life.unit == TimeUnit.BATCH:
self.match_events.append(Event.BATCH_END)
def match(self, event: Event, state: State) -> bool:
return event in self.match_events
def apply(self, event: Event, state: State, logger: Logger) -> None:
assert isinstance(self.update_interval, Time)
if event == Event.FIT_START:
if self.ema_model is not None:
_move_shadow_model_to_device(self.ema_model, state.model)
if self.training_model is not None:
_move_shadow_model_to_device(self.training_model, state.model)
if event in [Event.BATCH_END, Event.EPOCH_END]:
# Check if an update should happen
if state.timestamp.get(self.update_interval.unit).value % self.update_interval.value == 0:
# Initialize the shadow models if they don't exist yet
if self.ema_model is None:
self.ema_model = ShadowModel(state.model)
if self.training_model is None and self.train_with_ema_weights is False:
self.training_model = ShadowModel(state.model)
# Update the ema model
compute_ema(state.model, self.ema_model, smoothing=self.smoothing)
if self.train_with_ema_weights:
# Use the ema weights for further training
_copy_model(self.ema_model, state.model)
if event == Event.EVAL_START and self.ema_model is not None and self.training_model is not None:
# Swap out the training model for the ema model in state
_copy_model(state.model, self.training_model)
_copy_model(self.ema_model, state.model)
if event == Event.EVAL_END and self.training_model is not None:
# Swap out the ema model for the training model in state
_copy_model(self.training_model, state.model)
def get_ema_model(self, model: torch.nn.Module):
"""Copies ema model parameters and buffers to the input model and returns it.
Args:
model (torch.nn.Module): the model to convert into the ema model.
Returns:
model (torch.nn.Module): the input model with parameters and buffers replaced with the averaged parameters
and buffers.
"""
if self.ema_model is None:
raise AttributeError('ema model has not been initialized yet')
_copy_model(self.ema_model, model)
return model
def state_dict(self) -> Dict[str, ShadowModel]:
state_dict = {}
for attribute_name in self.serialized_attributes:
shadow_model = getattr(self, attribute_name)
state_dict[attribute_name] = {}
state_dict[attribute_name]['parameters'] = shadow_model.parameters()
state_dict[attribute_name]['buffers'] = shadow_model.buffers()
return state_dict
def load_shadow_model(self, name, parameters: List, buffers: List):
shadow_model = ShadowModel(None)
shadow_model.param_list = parameters
shadow_model.buffer_list = buffers
setattr(self, name, shadow_model)
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
for attribute_name, serialized_value in state.items():
self.load_shadow_model(attribute_name, serialized_value['parameters'], serialized_value['buffers'])
class ShadowModel:
"""A shadow model that tracks parameters and buffers from an original source model.
Args:
model (torch.nn.Module): the source model containing the parameters and buffers to shadow.
"""
def __init__(self, model: Union[None, torch.nn.Module]):
if model is not None:
self.param_list = [copy.deepcopy(p.data) for p in model.parameters()]
self.buffer_list = [copy.deepcopy(b.data) for b in model.buffers()]
else:
self.param_list = []
self.buffer_list = []
def parameters(self):
return self.param_list
def buffers(self):
return self.buffer_list
T_Model = Union[torch.nn.Module, ShadowModel]
def _copy_model(source_model: T_Model, destination_model: T_Model):
"""Copies parameters and buffers from ``source_model`` to ``destination_model``"""
with torch.no_grad():
source_params = itertools.chain(source_model.parameters(), source_model.buffers())
destination_params = itertools.chain(destination_model.parameters(), destination_model.buffers())
for source_param, destination_param in zip(source_params, destination_params):
destination_param.data = source_param.data
def _move_shadow_model_to_device(shadow_model: ShadowModel, destination_model: torch.nn.Module):
"""Ensures the tensors of a shadow model are on the same device as a destination model"""
with torch.no_grad():
destination_params = destination_model.parameters()
shadow_params = shadow_model.parameters()
shadow_model.param_list = [s.to(d.device) for s, d in zip(shadow_params, destination_params)]
destination_buffers = destination_model.buffers()
shadow_buffers = shadow_model.buffers()
shadow_model.buffer_list = [s.to(d.device) for s, d in zip(shadow_buffers, destination_buffers)]
|
import cv2
import glob #library that look for a list of files on the filesystem with names matching a pattern - template images
original_resized = cv2.imread('../Images/corner1.png') #captured image, got from the camera
original = cv2.resize(original_resized,(75, 110))
all_templates = [] #array to store template images
card_name = ["A", "J", "K", "Q"]
for i in glob.glob("../Images/Templates/*"): #Return a list of path names in the templates folder
template_img = cv2.imread(i)
all_templates.append(template_img)
for template, card_name in zip(all_templates, card_name): #zip allows to work with more than 1 array/list at a time
image1 = original.shape #gives information about size and channels of the images (3 for b g r). Optional.
image2 = template.shape
print(image1)
print(image2)
if original.shape == template.shape: #checks if the size and amount of channels in the images match. Might not be needed
print("same size and channels")
difference = cv2.subtract(original, template) #Calculates the pixel difference between original and template images
#subtracts black pixels as in these pictures they can be either black and white
b, g, r = cv2.split(difference) #split an image into three different intensity arrays for each color channel (b g r)
print(cv2.countNonZero(b)) #the difference in blue channel
#countNonZero - counts the empty spots in the array of pixels (determines white pixels)
#less white pixels means pictures are more likely to be equal
whitePixelThreshold = 2000;
if cv2.countNonZero(b) <= whitePixelThreshold: #only needs info from one channel, the images are binary
print("equal")
print("Matching card " + card_name)
card = card_name
break
else:
print("not equal")
#do something with detected images
if card == "A":
print("Ace")
elif card == "J":
print("Jack")
elif card == "K":
print("King")
else:
print("Queen")
cv2.imshow("Original", original)
cv2.imshow("Template", template)
cv2.imshow("Subtracted image", difference)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import os
import sys
from setuptools import setup, find_packages
from tethys_apps.app_installation import custom_develop_command, custom_install_command
### Apps Definition ###
app_package = 'ucar_hydrologic_forecasts'
release_package = 'tethysapp-' + app_package
app_class = 'ucar_hydrologic_forecasts.app:UcarHydrologicForecasts'
app_package_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tethysapp', app_package)
### Python Dependencies ###
dependencies = []
setup(
name=release_package,
version='0.0',
description='',
long_description='',
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['tethysapp', 'tethysapp.' + app_package],
include_package_data=True,
zip_safe=False,
install_requires=dependencies,
cmdclass={
'install': custom_install_command(app_package, app_package_dir, dependencies),
'develop': custom_develop_command(app_package, app_package_dir, dependencies)
}
)
|
from __future__ import annotations
import os
from collections.abc import Callable
from typing import Any, ClassVar, Generic, TypeVar
import attr
from flask import current_app
from loguru import logger
from PIL import Image
from .. import redisw
from ..constants import COUNT_KEY
from ..utils import get_size, natsize
from .args import BaseImageArgs
from .utils import normalize_fmt, resolve_color
_A = TypeVar("_A", bound=BaseImageArgs)
def _jpeg_opt_kw(args: BaseImageArgs) -> dict[str, Any]:
return {
"optimize": bool(current_app.config.get("JPEG_OPTIMIZE")),
"quality": current_app.config.get("JPEG_QUALITY"),
"dpi": (args.dpi, args.dpi),
}
def _png_opt_kw(args: BaseImageArgs) -> dict[str, Any]:
return {
"optmize": bool(current_app.config.get("PNG_OPTIMIZE")),
"dpi": (args.dpi, args.dpi),
"compress_level": current_app.config.get("PNG_COMPRESS_LEVEL"),
}
def _webp_opt_kw(args: BaseImageArgs) -> dict[str, Any]:
return {
"quality": current_app.config.get("WEBP_QUALITY"),
"method": current_app.config.get("WEBP_METHOD"),
"lossless": bool(current_app.config.get("WEBP_LOSSLESS")),
}
def _gif_opt_kw(args: BaseImageArgs) -> dict[str, Any]:
return {"optmize": bool(current_app.config.get("GIF_OPTIMIZE"))}
SAVE_KW: dict[str, Callable[[BaseImageArgs], dict[str, Any]]] = {
"jpeg": _jpeg_opt_kw,
"png": _png_opt_kw,
"webp": _webp_opt_kw,
"gif": _gif_opt_kw,
}
@attr.s(slots=True, auto_attribs=True)
class BaseGeneratedImage(Generic[_A]):
mode: ClassVar[str] = "RGBA"
size: tuple[int, int]
fmt: str = attr.ib(converter=normalize_fmt)
bg_color: str = attr.ib(converter=resolve_color)
fg_color: str = attr.ib(converter=resolve_color)
args: _A
def get_save_kw(self) -> dict[str, Any]:
kw_func = SAVE_KW.get(self.fmt, None)
return {} if kw_func is None else kw_func(self.args)
def make(self) -> Image.Image:
raise NotImplementedError("Subclass must implement.")
def save_img(self, im: Image.Image, path: str):
save_kw = self.get_save_kw()
im.save(path, **save_kw)
im.close()
sz = natsize(get_size(path), fmt="{0:.1f}")
logger.info('Created "{0}" ({1})', os.path.basename(path), sz)
redisw.client.incr(COUNT_KEY)
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import uuid
import gevent
import gevent.event
import gevent.monkey
gevent.monkey.patch_all()
import requests
import copy
from cStringIO import StringIO
import bottle
import logging
import logging.handlers
from datetime import datetime
import Queue
import ConfigParser
import keystoneclient.v2_0.client as keystone
import keystoneclient.v3.client as keystonev3
from netaddr import *
try:
from cfgm_common import vnc_plugin_base
from cfgm_common import utils as cfgmutils
except ImportError:
from common import vnc_plugin_base
from cfgm_common import utils as cfgmutils
from cfgm_common.utils import cgitb_hook
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from vnc_api import vnc_api
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
import neutron_plugin_interface as npi
from context import use_context
Q_CREATE = 'create'
Q_DELETE = 'delete'
Q_MAX_ITEMS = 1000
#Keystone SSL support
_DEFAULT_KS_CERT_BUNDLE="/tmp/keystonecertbundle.pem"
DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
RETRIES_BEFORE_LOG = 100
def fill_keystone_opts(obj, conf_sections):
obj._auth_user = conf_sections.get('KEYSTONE', 'admin_user')
obj._auth_passwd = conf_sections.get('KEYSTONE', 'admin_password')
obj._admin_token = conf_sections.get('KEYSTONE', 'admin_token')
obj._admin_tenant = conf_sections.get('KEYSTONE', 'admin_tenant_name')
try:
obj._keystone_sync_on_demand = conf_sections.getboolean('KEYSTONE',
'keystone_sync_on_demand')
except ConfigParser.NoOptionError:
obj._keystone_sync_on_demand = True
try:
obj._insecure = conf_sections.getboolean('KEYSTONE', 'insecure')
except ConfigParser.NoOptionError:
obj._insecure = True
try:
obj._certfile = conf_sections.get('KEYSTONE', 'certfile')
except ConfigParser.NoOptionError:
obj._certfile = ''
try:
obj._keyfile = conf_sections.get('KEYSTONE', 'keyfile')
except ConfigParser.NoOptionError:
obj._keyfile = ''
try:
obj._cafile= conf_sections.get('KEYSTONE', 'cafile')
except ConfigParser.NoOptionError:
obj._cafile = ''
obj._kscertbundle=''
obj._use_certs=False
if obj._certfile:
certs = [obj._certfile]
if obj._keyfile and obj._cafile:
certs=[obj._certfile,obj._keyfile,obj._cafile]
obj._kscertbundle=cfgmutils.getCertKeyCaBundle(_DEFAULT_KS_CERT_BUNDLE,certs)
obj._use_certs=True
try:
obj._auth_url = conf_sections.get('KEYSTONE', 'auth_url')
except ConfigParser.NoOptionError:
# deprecated knobs - for backward compat
obj._auth_proto = conf_sections.get('KEYSTONE', 'auth_protocol')
obj._auth_host = conf_sections.get('KEYSTONE', 'auth_host')
obj._auth_port = conf_sections.get('KEYSTONE', 'auth_port')
obj._auth_url = "%s://%s:%s/v2.0" % (obj._auth_proto, obj._auth_host,
obj._auth_port)
try:
obj._err_file = conf_sections.get('DEFAULTS', 'trace_file')
except ConfigParser.NoOptionError:
obj._err_file = '/var/log/contrail/vnc_openstack.err'
try:
# Duration between polls to keystone to find deleted projects
resync_interval = conf_sections.get('DEFAULTS',
'keystone_resync_interval_secs')
except ConfigParser.NoOptionError:
resync_interval = '60'
obj._resync_interval_secs = float(resync_interval)
try:
# Number of workers used to process keystone project resyncing
resync_workers = conf_sections.get('DEFAULTS',
'keystone_resync_workers')
except ConfigParser.NoOptionError:
resync_workers = '10'
obj._resync_number_workers = int(resync_workers)
try:
# If new project with same name as an orphan project
# (gone in keystone, present in # contrail with resources within)
# is encountered,
# a. proceed with unique ified name (new_unique_fqn)
# b. refuse to sync (new_fail)
# c. cascade delete (TODO)
resync_mode = conf_sections.get('DEFAULTS',
'keystone_resync_stale_mode')
except ConfigParser.NoOptionError:
resync_mode = 'new_unique_fqn'
obj._resync_stale_mode = resync_mode
try:
# Get the domain_id for keystone v3
obj._domain_id = conf_sections.get('KEYSTONE', 'admin_domain_id')
except ConfigParser.NoOptionError:
obj._domain_id = 'default'
try:
# Get the user_domain_name for keystone v3
obj._user_domain_name = conf_sections.get('KEYSTONE', 'admin_user_domain_name')
except ConfigParser.NoOptionError:
obj._user_domain_name = 'Default'
try:
# Get the project_domain_name for keystone v3
obj._project_domain_name = conf_sections.get('KEYSTONE', 'project_domain_name')
except ConfigParser.NoOptionError:
obj._project_domain_name = None
try:
# Get the project_name for keystone v3
obj._project_name = conf_sections.get('KEYSTONE', 'project_name')
except ConfigParser.NoOptionError:
obj._project_name = obj._admin_tenant
try:
# Get the endpoint_type
obj._endpoint_type = conf_sections.get('KEYSTONE', 'endpoint_type')
except ConfigParser.NoOptionError:
obj._endpoint_type = None
def _create_default_security_group(vnc_lib, proj_obj):
def _get_rule(ingress, sg, prefix, ethertype):
sgr_uuid = str(uuid.uuid4())
if sg:
addr = AddressType(
security_group=proj_obj.get_fq_name_str() + ':' + sg)
elif prefix:
addr = AddressType(subnet=SubnetType(prefix, 0))
local_addr = AddressType(security_group='local')
if ingress:
src_addr = addr
dst_addr = local_addr
else:
src_addr = local_addr
dst_addr = addr
rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
protocol='any',
src_addresses=[src_addr],
src_ports=[PortType(0, 65535)],
dst_addresses=[dst_addr],
dst_ports=[PortType(0, 65535)],
ethertype=ethertype)
return rule
rules = [_get_rule(True, 'default', None, 'IPv4'),
_get_rule(True, 'default', None, 'IPv6'),
_get_rule(False, None, '0.0.0.0', 'IPv4'),
_get_rule(False, None, '::', 'IPv6')]
sg_rules = PolicyEntriesType(rules)
# create security group
id_perms = IdPermsType(enable=True,
description=DEFAULT_SECGROUP_DESCRIPTION)
sg_obj = vnc_api.SecurityGroup(name='default', parent_obj=proj_obj,
id_perms=id_perms,
security_group_entries=sg_rules)
vnc_lib.security_group_create(sg_obj)
# neutron doesn't propagate user token
vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
def ensure_default_security_group(vnc_lib, proj_obj):
sg_groups = proj_obj.get_security_groups()
for sg_group in sg_groups or []:
if sg_group['to'][-1] == 'default':
return
_create_default_security_group(vnc_lib, proj_obj)
openstack_driver = None
class OpenstackDriver(vnc_plugin_base.Resync):
def __init__(self, api_server_ip, api_server_port, conf_sections, sandesh):
global openstack_driver
openstack_driver = self
if api_server_ip == '0.0.0.0':
self._vnc_api_ip = '127.0.0.1'
else:
self._vnc_api_ip = api_server_ip
self._vnc_api_port = api_server_port
self._config_sections = conf_sections
fill_keystone_opts(self, conf_sections)
if 'v3' in self._auth_url.split('/')[-1]:
self._ks_domains_list = self._ksv3_domains_list
self._ks_domain_get = self._ksv3_domain_get
self._ks_projects_list = self._ksv3_projects_list
self._ks_project_get = self._ksv3_project_get
self.sync_project_to_vnc = self._ksv3_sync_project_to_vnc
self._add_project_to_vnc = self._ksv3_add_project_to_vnc
self._del_project_from_vnc = self._ksv3_del_project_from_vnc
self._vnc_default_domain_id = None
else:
self._ks_domains_list = None
self._ks_domain_get = None
self._ks_projects_list = self._ksv2_projects_list
self._ks_project_get = self._ksv2_project_get
self.sync_project_to_vnc = self._ksv2_sync_project_to_vnc
self._add_project_to_vnc = self._ksv2_add_project_to_vnc
self._del_project_from_vnc = self._ksv2_del_project_from_vnc
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.INIT, message='',
server_addrs=[self._auth_url])
self._vnc_lib = None
# resync failures, don't retry forever
self._failed_domain_dels = set()
self._failed_project_dels = set()
# active domains/projects in contrail/vnc api server
self._vnc_domain_ids = set()
self._vnc_project_ids = set()
# logging
self._sandesh_logger = sandesh.logger()
self._vnc_os_logger = logging.getLogger(__name__)
self._vnc_os_logger.setLevel(logging.ERROR)
# Add the log message handler to the logger
try:
with open(self._err_file, 'a'):
handler = logging.handlers.RotatingFileHandler(
self._err_file, maxBytes=64*1024, backupCount=5)
self._vnc_os_logger.addHandler(handler)
except IOError:
self._sandesh_logger.error("Failed to open trace file %s" %
self._err_file)
self.q = Queue.Queue(maxsize=Q_MAX_ITEMS)
#end __init__
def _cgitb_error_log(self):
tmp_file = StringIO()
cgitb_hook(format="text", file=tmp_file)
self._vnc_os_logger.error("%s" % tmp_file.getvalue())
tmp_file.close()
def __call__(self):
pass
#end __call__
def _get_vnc_conn(self):
if self._vnc_lib:
return
self._vnc_lib = vnc_api.VncApi(
api_server_host=self._vnc_api_ip,
api_server_port=self._vnc_api_port,
username=self._auth_user,
password=self._auth_passwd,
tenant_name=self._admin_tenant)
# end _get_vnc_conn
def _get_keystone_conn(self):
if self._ks:
return
if 'v3' in self._auth_url.split('/')[-1]:
self._ks = self._ksv3_get_conn()
if self._endpoint_type and self._ks.service_catalog:
self._ks.management_url = \
self._ks.service_catalog.get_urls(
service_type='identity',
endpoint_type=self._endpoint_type)[0]
else:
self._ks = self._ksv2_get_conn()
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.UP, message='',
server_addrs=[self._auth_url])
# end _get_keystone_conn
def _ksv2_get_conn(self):
if self._admin_token:
if self._insecure:
return keystone.Client(token=self._admin_token,
endpoint=self._auth_url,
insecure=self._insecure)
elif not self._insecure and self._use_certs:
return keystone.Client(token=self._admin_token,
endpoint=self._auth_url,
cacert=self._kscertbundle)
else:
return keystone.Client(token=self._admin_token,
endpoint=self._auth_url)
else:
if self._insecure:
return keystone.Client(username=self._auth_user,
password=self._auth_passwd,
tenant_name=self._admin_tenant,
auth_url=self._auth_url,
insecure=self._insecure)
elif not self._insecure and self._use_certs:
return keystone.Client(username=self._auth_user,
password=self._auth_passwd,
tenant_name=self._admin_tenant,
auth_url=self._auth_url,
cacert=self._kscertbundle)
else:
return keystone.Client(username=self._auth_user,
password=self._auth_passwd,
tenant_name=self._admin_tenant,
auth_url=self._auth_url)
# end _ksv2_get_conn
def _ksv2_projects_list(self):
return [{'id': tenant.id} for tenant in self._ks.tenants.list()]
# end _ksv2_projects_list
def _ksv2_project_get(self, id):
# Note: under certain circumstances (if it has been initailized
# before endpoints are populated in keystone) keystoneclient may
# be valid to list projects, but not to read them. As it won't
# be reset by resync_all_projects, it is reseted on error here.
try:
return {'name': self._ks.tenants.get(id).name}
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
self._get_keystone_conn()
return {'name': self._ks.tenants.get(id).name}
# end _ksv2_project_get
def _ksv2_sync_project_to_vnc(self, id=None):
self._get_keystone_conn()
self._get_vnc_conn()
ks_project = self._ks_project_get(id=id.replace('-', ''))
display_name = ks_project['name']
proj_name = display_name
# if earlier project exists with same name but diff id,
# create with uniqified fq_name
fq_name = ['default-domain', display_name]
try:
old_id = self._vnc_lib.fq_name_to_id('project', fq_name)
if old_id == id:
self._vnc_project_ids.add(id)
return
# Project might have been quickly deleted + added.
# Since project delete sync happens only in timer(polling),
# try deleting old one synchronously. If delete fails due
# to resources being present in project, proceed/fail
# based on configuration
try:
self._vnc_lib.project_delete(fq_name=fq_name)
except vnc_api.NoIdError:
pass
except vnc_api.RefsExistError:
if self._resync_stale_mode == 'new_unique_fqn':
proj_name = '%s-%s' %(display_name, str(uuid.uuid4()))
else:
errmsg = "Old project %s fqn %s exists and not empty" %(
old_id, fq_name)
self._sandesh_logger.error(errmsg)
raise Exception(errmsg)
except vnc_api.NoIdError:
pass
proj_obj = vnc_api.Project(proj_name)
proj_obj.display_name = display_name
proj_obj.uuid = id
self._vnc_lib.project_create(proj_obj)
self._vnc_project_ids.add(id)
# end _ksv2_sync_project_to_vnc
def _ksv2_add_project_to_vnc(self, project_id):
try:
self._vnc_lib.project_read(id=project_id)
# project exists, no-op for now,
# sync any attr changes in future
except vnc_api.NoIdError:
self._ksv2_sync_project_to_vnc(project_id)
# _ksv2_add_project_to_vnc
def _ksv2_del_project_from_vnc(self, project_id):
if project_id in self._failed_project_dels:
return
try:
self._vnc_lib.project_delete(id=project_id)
except vnc_api.NoIdError:
pass
except Exception as e:
self._cgitb_error_log()
self._sandesh_logger.error("Failed to delete project %s: %s" %
(project_id, e))
self._failed_project_dels.add(project_id)
# _ksv2_del_project_from_vnc
def _ksv3_get_conn(self):
if self._admin_token:
if not self._insecure and self._use_certs:
return keystonev3.Client(token=self._admin_token,
endpoint=self._auth_url,
verify=self._kscertbundle)
else:
return keystonev3.Client(token=self._admin_token,
endpoint=self._auth_url,
insecure=self._insecure)
elif self._project_domain_name:
return keystonev3.Client(user_domain_name=self._user_domain_name,
username=self._auth_user,
password=self._auth_passwd,
project_domain_name=self._project_domain_name,
project_name=self._project_name,
auth_url=self._auth_url,
insecure=self._insecure)
else:
if not self._insecure and self._use_certs:
return keystonev3.Client(user_domain_name=self._user_domain_name,
username=self._auth_user,
password=self._auth_passwd,
domain_id=self._domain_id,
auth_url=self._auth_url,
verify=self._kscertbundle)
else:
return keystonev3.Client(user_domain_name=self._user_domain_name,
username=self._auth_user,
password=self._auth_passwd,
domain_id=self._domain_id,
auth_url=self._auth_url,
insecure=self._insecure)
# end _ksv3_get_conn
def _ksv3_domains_list(self):
return [{'id': domain.id} for domain in self._ks.domains.list()]
# end _ksv3_domains_list
def _ksv3_domain_id_to_uuid(self, domain_id):
if domain_id == 'default':
return self._vnc_default_domain_id
return str(uuid.UUID(domain_id))
# _ksv3_domain_id_to_uuid
def _ksv3_domain_get(self, id=None):
try:
return {'name': self._ks.domains.get(id).name}
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
self._get_keystone_conn()
return {'name': self._ks.domains.get(id).name}
# end _ksv3_domain_get
def _ksv3_projects_list(self):
return [{'id': project.id} for project in self._ks.projects.list()]
# end _ksv3_projects_list
def _ksv3_project_get(self, id=None):
try:
project = self._ks.projects.get(id)
return {'id': project.id, 'name': project.name, 'domain_id': project.domain_id}
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
self._get_keystone_conn()
project = self._ks.projects.get(id)
return {'id': project.id, 'name': project.name, 'domain_id': project.domain_id}
# end _ksv3_project_get
def _ksv3_sync_project_to_vnc(self, id=None, name=None):
self._get_keystone_conn()
self._get_vnc_conn()
if id:
ks_project = \
self._ks_project_get(id=id.replace('-', ''))
display_name = ks_project['name']
project_id = id
elif name:
ks_project = \
self._ks_project_get(name=name)
project_id = ks_project['id']
display_name = name
domain_uuid = self._ksv3_domain_id_to_uuid(ks_project['domain_id'])
dom_obj = self._vnc_lib.domain_read(id=domain_uuid)
# if earlier project exists with same name but diff id,
# create with uniqified fq_name
fq_name = dom_obj.get_fq_name() + [display_name]
project_name = display_name
try:
old_id = self._vnc_lib.fq_name_to_id('project', fq_name)
if old_id == project_id:
self._vnc_project_ids.add(project_id)
return
# Project might have been quickly deleted + added.
# Since project delete sync happens only in timer(polling),
# try deleting old one synchronously. If delete fails due
# to resources being present in project, proceed/fail
# based on configuration
try:
self._vnc_lib.project_delete(fq_name=fq_name)
except vnc_api.NoIdError:
pass
except vnc_api.RefsExistError:
if self._resync_stale_mode == 'new_unique_fqn':
project_name = '%s-%s' %(display_name, str(uuid.uuid4()))
else:
errmsg = "Old project %s fqn %s exists and not empty" %(
old_id, fq_name)
self._sandesh_logger.error(errmsg)
raise Exception(errmsg)
except vnc_api.NoIdError:
pass
proj_obj = vnc_api.Project(project_name, parent_obj=dom_obj)
proj_obj.display_name = display_name
proj_obj.uuid = project_id
self._vnc_lib.project_create(proj_obj)
self._vnc_domain_ids.add(domain_uuid)
self._vnc_project_ids.add(project_id)
# end _ksv3_sync_project_to_vnc
def _ksv3_add_project_to_vnc(self, project_id):
try:
self._vnc_lib.project_read(id=project_id)
# project exists, no-op for now,
# sync any attr changes in future
except vnc_api.NoIdError:
self._ksv3_sync_project_to_vnc(id=project_id)
# _ksv3_add_project_to_vnc
def _ksv3_del_project_from_vnc(self, project_id):
if project_id in self._failed_project_dels:
return
try:
self._vnc_lib.project_delete(id=project_id)
except vnc_api.NoIdError:
pass
except Exception as e:
self._cgitb_error_log()
self._sandesh_logger.error("Failed to delete project %s "
"from vnc: %s" % (project_id, e))
self._failed_project_dels.add(project_id)
# _ksv3_del_project_from_vnc
def sync_domain_to_vnc(self, domain_id):
self._get_keystone_conn()
self._get_vnc_conn()
ks_domain = \
self._ks_domain_get(domain_id.replace('-', ''))
display_name = ks_domain['name']
domain_name = display_name
# if earlier domain exists with same name but diff id,
# create with uniqified fq_name
fq_name = [display_name]
try:
old_id = self._vnc_lib.fq_name_to_id('domain', fq_name)
if domain_id == old_id:
self._vnc_domain_ids.add(domain_id)
return
# Domain might have been quickly deleted + added.
# Since domain delete sync happens only in timer(polling),
# try deleting old one synchronously. If delete fails due
# to resources being present in domain, proceed/fail
# based on configuration
try:
self._vnc_lib.domain_delete(fq_name=fq_name)
except vnc_api.NoIdError:
pass
except vnc_api.RefsExistError:
if self._resync_stale_mode == 'new_unique_fqn':
domain_name = '%s-%s' %(display_name, str(uuid.uuid4()))
else:
errmsg = "Old domain %s fqn %s exists and not empty" %(
old_id, fq_name)
self._sandesh_logger.error(errmsg)
raise Exception(errmsg)
except vnc_api.NoIdError:
pass
dom_obj = vnc_api.Domain(domain_name)
dom_obj.display_name = display_name
dom_obj.uuid = domain_id
self._vnc_lib.domain_create(dom_obj)
self._vnc_domain_ids.add(domain_id)
# sync_domain_to_vnc
def _add_domain_to_vnc(self, domain_id):
try:
self._vnc_lib.domain_read(id=domain_id)
# domain exists, no-op for now,
# sync any attr changes in future
except vnc_api.NoIdError:
self.sync_domain_to_vnc(domain_id)
# _add_domain_to_vnc
def _del_domain_from_vnc(self, domain_id):
if domain_id in self._failed_domain_dels:
return
try:
self._vnc_lib.domain_delete(id=domain_id)
except vnc_api.NoIdError:
pass
except Exception as e:
self._sandesh_logger.error("Failed to delete domain %s "
"from vnc: %s" % (domain_id, e))
self._cgitb_error_log()
self._failed_domain_dels.add(domain_id)
# _del_domain_from_vnc
def _resync_all_domains(self):
if not self._ks_domains_list:
# < keystonev3, no domains
return False
self._get_keystone_conn()
# compare new and old set,
# optimize for common case where nothing has changed,
# so track the project-ids in a set add '-',
# keystone gives uuid without...
try:
# The Default domain in ks(for v2 support) has id of 'default'
# replace with uuid of default-domain in vnc
ks_domain_ids = set(
[str(uuid.UUID(dom['id']))
for dom in self._ks_domains_list() if dom['id'] != 'default'])
ks_domain_ids.add(self._vnc_default_domain_id)
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
return True # retry
vnc_domain_ids = self._vnc_domain_ids
if vnc_domain_ids == ks_domain_ids:
# no change, go back to poll
return False
for vnc_domain_id in vnc_domain_ids - ks_domain_ids:
self.q.put((Q_DELETE, 'domain', vnc_domain_id))
if self._keystone_sync_on_demand:
# pre_domain_read will get it
pass
else:
for ks_domain_id in ks_domain_ids - vnc_domain_ids:
self.q.put((Q_CREATE, 'domain', ks_domain_id))
self.q.join()
gevent.sleep(0)
# we are in sync
self._vnc_domain_ids = ks_domain_ids
return False
# end _resync_all_domains
def _resync_all_projects(self):
self._get_keystone_conn()
# compare new and old set,
# optimize for common case where nothing has changed,
# so track the project-ids in a set add '-',
# keystone gives uuid without...
try:
ks_project_ids = set(
[str(uuid.UUID(proj['id']))
for proj in self._ks_projects_list()])
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
return True # retry
vnc_project_ids = self._vnc_project_ids
if vnc_project_ids == ks_project_ids:
# no change, go back to poll
return False
for vnc_project_id in vnc_project_ids - ks_project_ids:
self.q.put((Q_DELETE, 'project', vnc_project_id))
if self._keystone_sync_on_demand:
pass # pre_project_read will get it
else:
for ks_project_id in ks_project_ids - vnc_project_ids:
self.q.put((Q_CREATE, 'project', ks_project_id))
self.q.join()
gevent.sleep(0)
# we are in sync
self._vnc_project_ids = ks_project_ids
return False
# end _resync_all_projects
def _resync_domains_projects_forever(self):
try:
# get connection to api-server REST interface
while True:
try:
self._get_vnc_conn()
break
except requests.ConnectionError:
gevent.sleep(1)
vnc_domains = self._vnc_lib.domains_list()['domains']
for dom in vnc_domains:
self._vnc_domain_ids.add(dom['uuid'])
if dom['fq_name'] == ['default-domain']:
self._vnc_default_domain_id = dom['uuid']
vnc_all_projects = self._vnc_lib.projects_list()['projects']
# remove default-domain:default-project from audit list
default_proj_fq_name = ['default-domain', 'default-project']
vnc_project_ids = set([proj['uuid'] for proj in vnc_all_projects
if proj['fq_name'] != default_proj_fq_name])
self._vnc_project_ids = vnc_project_ids
except Exception as e:
self._cgitb_error_log()
self._sandesh_logger.error(
"Connection to API server failed: %s" % e)
while True:
# Get domains/projects from Keystone and audit with api-server
try:
retry = self._resync_all_domains()
if retry:
gevent.sleep(self._resync_interval_secs)
continue
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
self._cgitb_error_log()
self._sandesh_logger.error(
"Failed to resync domains: %s" % e)
try:
retry = self._resync_all_projects()
if retry:
gevent.sleep(self._resync_interval_secs)
continue
except Exception as e:
if self._ks is not None:
self._ks = None
ConnectionState.update(conn_type=ConnType.OTHER,
name='Keystone', status=ConnectionStatus.DOWN,
message='Error: %s at UTC %s' %(e, datetime.utcnow()),
server_addrs=[self._auth_url])
self._cgitb_error_log()
self._sandesh_logger.error(
"Failed to resync projects: %s" % e)
gevent.sleep(self._resync_interval_secs)
#end while True
#end _resync_domains_projects_forever
def resync_domains_projects(self):
# add asynchronously
self._main_glet = gevent.spawn(self._resync_domains_projects_forever)
self._worker_glets = []
for x in range(self._resync_number_workers):
self._worker_glets.append(gevent.spawn(self._resync_worker))
#end resync_domains_projects
def _resync_worker(self):
while True:
oper, obj_type, obj_id = self.q.get()
try:
if oper == Q_DELETE:
if obj_type == 'domain':
self._del_domain_from_vnc(obj_id)
elif obj_type == 'project':
self._del_project_from_vnc(obj_id)
else:
raise KeyError("An invalid obj_type was specified: %s",
obj_type)
elif oper == Q_CREATE:
if obj_type == 'domain':
self._add_domain_to_vnc(obj_id)
elif obj_type == 'project':
self._add_project_to_vnc(obj_id)
else:
raise KeyError("An invalid obj_type was specified: %s",
obj_type)
else:
raise KeyError("An invalid operation was specified: %s", oper)
except (ValueError, KeyError, Exception):
# For an unpack error or and invalid kind.
self.log_exception()
finally:
self.q.task_done()
# end _resync_worker
#end class OpenstackResync
class ResourceApiDriver(vnc_plugin_base.ResourceApi):
def __init__(self, api_server_ip, api_server_port, conf_sections, sandesh,
propagate_map_exceptions=False):
if api_server_ip == '0.0.0.0':
self._vnc_api_ip = '127.0.0.1'
else:
self._vnc_api_ip = api_server_ip
self._sandesh_logger = sandesh.logger()
self._vnc_api_port = api_server_port
self._config_sections = conf_sections
fill_keystone_opts(self, conf_sections)
self._vnc_lib = None
self._openstack_drv = openstack_driver
self._connected_to_api_server = gevent.event.Event()
self._conn_glet = gevent.spawn(self._get_api_connection)
# end __init__
def _get_api_connection(self):
if self._vnc_lib:
return
# get connection to api-server REST interface
tries = 0
while True:
try:
tries = tries + 1
self._vnc_lib = vnc_api.VncApi(
api_server_host=self._vnc_api_ip,
api_server_port=self._vnc_api_port,
username=self._auth_user,
password=self._auth_passwd,
tenant_name=self._admin_tenant)
self._connected_to_api_server.set()
vnc_lib = self._vnc_lib
domain_id = vnc_lib.fq_name_to_id(
'domain', ['default-domain'])
project_id = vnc_lib.fq_name_to_id(
'project', ['default-domain', 'default-project'])
break
except Exception as e:
if tries % RETRIES_BEFORE_LOG == 0:
err_msg = "Connect error to contrail api %s tries: %s" \
%(tries, e)
self._sandesh_logger.error(err_msg)
gevent.sleep(1)
# end _get_api_connection
def __call__(self):
pass
#end __call__
def _create_default_security_group(self, proj_dict):
proj_obj = vnc_api.Project.from_dict(**proj_dict)
ensure_default_security_group(self._vnc_lib, proj_obj)
# end _create_default_security_group
def wait_for_api_server_connection(func):
def wrapper(self, *args, **kwargs):
self._connected_to_api_server.wait()
return func(self, *args, **kwargs)
return wrapper
# end wait_for_api_server_connection
@wait_for_api_server_connection
def pre_domain_read(self, id):
if not self._keystone_sync_on_demand:
# domain added via poll
return
# use list instead of read as read will be recursive
# leading us back here!
dom_list = self._vnc_lib.domains_list(obj_uuids=[id])
if len(dom_list['domains']) == 1:
# read succeeded domain already known, done.
return
# follow through, and sync domain to contrail
try:
self._openstack_drv.sync_domain_to_vnc(id)
except vnc_api.RefsExistError as e:
# another api server has brought syncd it
pass
# end pre_domain_read
@wait_for_api_server_connection
def pre_project_read(self, id):
if not self._keystone_sync_on_demand:
# project added via poll
return
# use list instead of read as read will be recursive
# leading us back here!
proj_list = self._vnc_lib.projects_list(obj_uuids=[id])
if len(proj_list['projects']) == 1:
# read succeeded project already known, done.
return
# follow through, and sync project to contrail
try:
self._openstack_drv.sync_project_to_vnc(id)
except vnc_api.RefsExistError as e:
# another api server has brought syncd it
pass
# end pre_project_read
@wait_for_api_server_connection
def post_project_create(self, proj_dict):
self._create_default_security_group(proj_dict)
# end post_create_project
@wait_for_api_server_connection
def pre_project_delete(self, proj_uuid):
proj_obj = self._vnc_lib.project_read(id=proj_uuid)
sec_groups = proj_obj.get_security_groups()
for group in sec_groups or []:
if group['to'][2] == 'default':
self._vnc_lib.security_group_delete(id=group['uuid'])
# end pre_project_delete
@wait_for_api_server_connection
def pre_virtual_network_create(self, vn_dict):
pass
# end pre_virtual_network_create
def _update_subnet_id(self, vn_uuid, new_refs, old_refs):
def get_subnets(ipam_refs):
subnets = {}
if ipam_refs:
for ipam_ref in ipam_refs:
vnsn_data = ipam_ref['attr']
ipam_subnets = vnsn_data['ipam_subnets']
for ipam_subnet in ipam_subnets:
if 'subnet' in ipam_subnet:
subnet_dict = copy.deepcopy(ipam_subnet['subnet'])
prefix = subnet_dict['ip_prefix']
prefix_len = subnet_dict['ip_prefix_len']
else:
#flat subnet where, subnet-uuid is unique,
#representing all subnets on ipam
prefix = '0.0.0.0'
prefix_len = 0
network = IPNetwork('%s/%s' % (prefix, prefix_len))
subnet_name = vn_uuid + ' ' + str(network)
subnet_uuid = ipam_subnet['subnet_uuid']
subnets[subnet_uuid] = subnet_name
return subnets
new_subnets = get_subnets(new_refs)
existing_subnets = get_subnets(old_refs)
add_subnets = set(new_subnets.keys()) - set(existing_subnets.keys())
del_subnets = set(existing_subnets.keys()) - set(new_subnets.keys())
for subnet in del_subnets or []:
try:
self._vnc_lib.kv_delete(existing_subnets[subnet])
except NoIdError:
pass
self._vnc_lib.kv_delete(subnet)
for subnet in add_subnets or []:
self._vnc_lib.kv_store(subnet, new_subnets[subnet])
@wait_for_api_server_connection
def post_virtual_network_create(self, vn_dict):
self._update_subnet_id(vn_dict['uuid'], vn_dict.get('network_ipam_refs'), None)
# end post_virtual_network_create
@wait_for_api_server_connection
def post_virtual_network_update(self, vn_uuid, vn_dict, old_dict):
ipam_refs = vn_dict.get('network_ipam_refs')
if ipam_refs is None:
return
self._update_subnet_id(vn_uuid, vn_dict.get('network_ipam_refs'),
old_dict.get('network_ipam_refs'))
# end post_virtual_network_update
@wait_for_api_server_connection
def post_virtual_network_delete(self, vn_uuid, vn_dict):
self._update_subnet_id(vn_uuid, None, vn_dict.get('network_ipam_refs'))
# end post_virtual_network_delete
# end class ResourceApiDriver
class NeutronApiDriver(vnc_plugin_base.NeutronApi):
def __init__(self, api_server_ip, api_server_port, conf_sections, sandesh, **kwargs):
self._logger = sandesh.logger()
self.api_server_obj = kwargs.get('api_server_obj')
self._npi = npi.NeutronPluginInterface(api_server_ip, api_server_port,
conf_sections, sandesh, api_server_obj=self.api_server_obj)
# Bottle callbacks for network operations
self.route('/neutron/network',
'POST', self._npi.plugin_http_post_network)
# Bottle callbacks for subnet operations
self.route('/neutron/subnet',
'POST', self._npi.plugin_http_post_subnet)
# Bottle callbacks for port operations
self.route('/neutron/port',
'POST', self._npi.plugin_http_post_port)
# Bottle callbacks for floating IP operations
self.route('/neutron/floatingip',
'POST', self._npi.plugin_http_post_floatingip)
# Bottle callbacks for security group operations
self.route('/neutron/security_group',
'POST', self._npi.plugin_http_post_securitygroup)
# Bottle callbacks for security group rule operations
self.route('/neutron/security_group_rule',
'POST', self._npi.plugin_http_post_securitygrouprule)
# Bottle callbacks for router operations
self.route('/neutron/router',
'POST', self._npi.plugin_http_post_router)
# Bottle callbacks for ipam operations
self.route('/neutron/ipam',
'POST', self._npi.plugin_http_post_ipam)
# Bottle callbacks for Policy operations
self.route('/neutron/policy',
'POST', self._npi.plugin_http_post_policy)
# Bottle callbacks for route-table operations
self.route('/neutron/route_table',
'POST', self._npi.plugin_http_post_route_table)
# Bottle callbacks for svc-instance operations
self.route('/neutron/nat_instance',
'POST', self._npi.plugin_http_post_svc_instance)
# Bottle callbacks for virtual-router operations
self.route('/neutron/virtual_router',
'POST', self._npi.plugin_http_post_virtual_router)
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
response = handler(*args, **kwargs)
return response
except Exception as e:
# don't log details of bottle.abort i.e handled error cases
if not isinstance(e, bottle.HTTPError):
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text",)
err_msg = string_buf.getvalue()
self._logger.error(err_msg)
raise
self.api_server_obj.api_bottle.route(uri, method, handler_trap_exception)
def __call__(self):
pass
|
# porcelain.py -- Porcelain-like layer on top of Dulwich
# Copyright (C) 2013 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# or (at your option) a later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Simple wrapper that provides porcelain-like functions on top of Dulwich.
Currently implemented:
* archive
* add
* clone
* commit
* commit-tree
* daemon
* diff-tree
* init
* list-tags
* pull
* push
* rm
* reset
* rev-list
* tag
* update-server-info
* status
* symbolic-ref
These functions are meant to behave similarly to the git subcommands.
Differences in behaviour are considered bugs.
"""
__docformat__ = 'restructuredText'
from collections import namedtuple
import os
import sys
import time
from dulwich import index
from dulwich.client import get_transport_and_path
from dulwich.errors import (
SendPackError,
UpdateRefsError,
)
from dulwich.index import get_unstaged_changes
from dulwich.objects import (
Tag,
parse_timezone,
)
from dulwich.objectspec import parse_object
from dulwich.patch import write_tree_diff
from dulwich.repo import (BaseRepo, Repo)
from dulwich.server import update_server_info as server_update_server_info
# Module level tuple definition for status output
GitStatus = namedtuple('GitStatus', 'staged unstaged untracked')
def open_repo(path_or_repo):
"""Open an argument that can be a repository or a path for a repository."""
if isinstance(path_or_repo, BaseRepo):
return path_or_repo
return Repo(path_or_repo)
def archive(location, committish=None, outstream=sys.stdout,
errstream=sys.stderr):
"""Create an archive.
:param location: Location of repository for which to generate an archive.
:param committish: Commit SHA1 or ref to use
:param outstream: Output stream (defaults to stdout)
:param errstream: Error stream (defaults to stderr)
"""
client, path = get_transport_and_path(location)
if committish is None:
committish = "HEAD"
# TODO(jelmer): This invokes C git; this introduces a dependency.
# Instead, dulwich should have its own archiver implementation.
client.archive(path, committish, outstream.write, errstream.write,
errstream.write)
def update_server_info(repo="."):
"""Update server info files for a repository.
:param repo: path to the repository
"""
r = open_repo(repo)
server_update_server_info(r)
def symbolic_ref(repo, ref_name, force=False):
"""Set git symbolic ref into HEAD.
:param repo: path to the repository
:param ref_name: short name of the new ref
:param force: force settings without checking if it exists in refs/heads
"""
repo_obj = open_repo(repo)
ref_path = 'refs/heads/%s' % ref_name
if not force and ref_path not in repo_obj.refs.keys():
raise ValueError('fatal: ref `%s` is not a ref' % ref_name)
repo_obj.refs.set_symbolic_ref('HEAD', ref_path)
def commit(repo=".", message=None, author=None, committer=None):
"""Create a new commit.
:param repo: Path to repository
:param message: Optional commit message
:param author: Optional author name and email
:param committer: Optional committer name and email
:return: SHA1 of the new commit
"""
# FIXME: Support --all argument
# FIXME: Support --signoff argument
r = open_repo(repo)
return r.do_commit(message=message, author=author,
committer=committer)
def commit_tree(repo, tree, message=None, author=None, committer=None):
"""Create a new commit object.
:param repo: Path to repository
:param tree: An existing tree object
:param author: Optional author name and email
:param committer: Optional committer name and email
"""
r = open_repo(repo)
return r.do_commit(message=message, tree=tree, committer=committer,
author=author)
def init(path=".", bare=False):
"""Create a new git repository.
:param path: Path to repository.
:param bare: Whether to create a bare repository.
:return: A Repo instance
"""
if not os.path.exists(path):
os.mkdir(path)
if bare:
return Repo.init_bare(path)
else:
return Repo.init(path)
def clone(source, target=None, bare=False, checkout=None, outstream=sys.stdout):
"""Clone a local or remote git repository.
:param source: Path or URL for source repository
:param target: Path to target repository (optional)
:param bare: Whether or not to create a bare repository
:param outstream: Optional stream to write progress to
:return: The new repository
"""
if checkout is None:
checkout = (not bare)
if checkout and bare:
raise ValueError("checkout and bare are incompatible")
client, host_path = get_transport_and_path(source)
if target is None:
target = host_path.split("/")[-1]
if not os.path.exists(target):
os.mkdir(target)
if bare:
r = Repo.init_bare(target)
else:
r = Repo.init(target)
remote_refs = client.fetch(host_path, r,
determine_wants=r.object_store.determine_wants_all,
progress=outstream.write)
r["HEAD"] = remote_refs["HEAD"]
if checkout:
outstream.write('Checking out HEAD')
index.build_index_from_tree(r.path, r.index_path(),
r.object_store, r["HEAD"].tree)
return r
def add(repo=".", paths=None):
"""Add files to the staging area.
:param repo: Repository for the files
:param paths: Paths to add. No value passed stages all modified files.
"""
# FIXME: Support patterns, directories.
r = open_repo(repo)
if not paths:
# If nothing is specified, add all non-ignored files.
paths = []
for dirpath, dirnames, filenames in os.walk(r.path):
# Skip .git and below.
if '.git' in dirnames:
dirnames.remove('.git')
for filename in filenames:
paths.append(os.path.join(dirpath[len(r.path)+1:], filename))
r.stage(paths)
def rm(repo=".", paths=None):
"""Remove files from the staging area.
:param repo: Repository for the files
:param paths: Paths to remove
"""
r = open_repo(repo)
index = r.open_index()
for p in paths:
del index[p]
index.write()
def print_commit(commit, outstream=sys.stdout):
"""Write a human-readable commit log entry.
:param commit: A `Commit` object
:param outstream: A stream file to write to
"""
outstream.write("-" * 50 + "\n")
outstream.write("commit: %s\n" % commit.id)
if len(commit.parents) > 1:
outstream.write("merge: %s\n" % "...".join(commit.parents[1:]))
outstream.write("author: %s\n" % commit.author)
outstream.write("committer: %s\n" % commit.committer)
outstream.write("\n")
outstream.write(commit.message + "\n")
outstream.write("\n")
def print_tag(tag, outstream=sys.stdout):
"""Write a human-readable tag.
:param tag: A `Tag` object
:param outstream: A stream to write to
"""
outstream.write("Tagger: %s\n" % tag.tagger)
outstream.write("Date: %s\n" % tag.tag_time)
outstream.write("\n")
outstream.write("%s\n" % tag.message)
outstream.write("\n")
def show_blob(repo, blob, outstream=sys.stdout):
"""Write a blob to a stream.
:param repo: A `Repo` object
:param blob: A `Blob` object
:param outstream: A stream file to write to
"""
outstream.write(blob.data)
def show_commit(repo, commit, outstream=sys.stdout):
"""Show a commit to a stream.
:param repo: A `Repo` object
:param commit: A `Commit` object
:param outstream: Stream to write to
"""
print_commit(commit, outstream)
parent_commit = repo[commit.parents[0]]
write_tree_diff(outstream, repo.object_store, parent_commit.tree, commit.tree)
def show_tree(repo, tree, outstream=sys.stdout):
"""Print a tree to a stream.
:param repo: A `Repo` object
:param tree: A `Tree` object
:param outstream: Stream to write to
"""
for n in tree:
outstream.write("%s\n" % n)
def show_tag(repo, tag, outstream=sys.stdout):
"""Print a tag to a stream.
:param repo: A `Repo` object
:param tag: A `Tag` object
:param outstream: Stream to write to
"""
print_tag(tag, outstream)
show_object(repo, repo[tag.object[1]], outstream)
def show_object(repo, obj, outstream):
return {
"tree": show_tree,
"blob": show_blob,
"commit": show_commit,
"tag": show_tag,
}[obj.type_name](repo, obj, outstream)
def log(repo=".", outstream=sys.stdout, max_entries=None):
"""Write commit logs.
:param repo: Path to repository
:param outstream: Stream to write log output to
:param max_entries: Optional maximum number of entries to display
"""
r = open_repo(repo)
walker = r.get_walker(max_entries=max_entries)
for entry in walker:
print_commit(entry.commit, outstream)
def show(repo=".", objects=None, outstream=sys.stdout):
"""Print the changes in a commit.
:param repo: Path to repository
:param objects: Objects to show (defaults to [HEAD])
:param outstream: Stream to write to
"""
if objects is None:
objects = ["HEAD"]
if not isinstance(objects, list):
objects = [objects]
r = open_repo(repo)
for objectish in objects:
show_object(r, parse_object(r, objectish), outstream)
def diff_tree(repo, old_tree, new_tree, outstream=sys.stdout):
"""Compares the content and mode of blobs found via two tree objects.
:param repo: Path to repository
:param old_tree: Id of old tree
:param new_tree: Id of new tree
:param outstream: Stream to write to
"""
r = open_repo(repo)
write_tree_diff(outstream, r.object_store, old_tree, new_tree)
def rev_list(repo, commits, outstream=sys.stdout):
"""Lists commit objects in reverse chronological order.
:param repo: Path to repository
:param commits: Commits over which to iterate
:param outstream: Stream to write to
"""
r = open_repo(repo)
for entry in r.get_walker(include=[r[c].id for c in commits]):
outstream.write("%s\n" % entry.commit.id)
def tag(repo, tag, author=None, message=None, annotated=False,
objectish="HEAD", tag_time=None, tag_timezone=None):
"""Creates a tag in git via dulwich calls:
:param repo: Path to repository
:param tag: tag string
:param author: tag author (optional, if annotated is set)
:param message: tag message (optional)
:param annotated: whether to create an annotated tag
:param objectish: object the tag should point at, defaults to HEAD
:param tag_time: Optional time for annotated tag
:param tag_timezone: Optional timezone for annotated tag
"""
r = open_repo(repo)
object = parse_object(r, objectish)
if annotated:
# Create the tag object
tag_obj = Tag()
if author is None:
# TODO(jelmer): Don't use repo private method.
author = r._get_user_identity()
tag_obj.tagger = author
tag_obj.message = message
tag_obj.name = tag
tag_obj.object = (type(object), object.id)
tag_obj.tag_time = tag_time
if tag_time is None:
tag_time = int(time.time())
if tag_timezone is None:
# TODO(jelmer) Use current user timezone rather than UTC
tag_timezone = 0
elif isinstance(tag_timezone, str):
tag_timezone = parse_timezone(tag_timezone)
tag_obj.tag_timezone = tag_timezone
r.object_store.add_object(tag_obj)
tag_id = tag_obj.id
else:
tag_id = object.id
r.refs['refs/tags/' + tag] = tag_id
def list_tags(repo, outstream=sys.stdout):
"""List all tags.
:param repo: Path to repository
:param outstream: Stream to write tags to
"""
r = open_repo(repo)
tags = list(r.refs.as_dict("refs/tags"))
tags.sort()
return tags
def reset(repo, mode, committish="HEAD"):
"""Reset current HEAD to the specified state.
:param repo: Path to repository
:param mode: Mode ("hard", "soft", "mixed")
"""
if mode != "hard":
raise ValueError("hard is the only mode currently supported")
r = open_repo(repo)
indexfile = r.index_path()
tree = r[committish].tree
index.build_index_from_tree(r.path, indexfile, r.object_store, tree)
def push(repo, remote_location, refs_path,
outstream=sys.stdout, errstream=sys.stderr):
"""Remote push with dulwich via dulwich.client
:param repo: Path to repository
:param remote_location: Location of the remote
:param refs_path: relative path to the refs to push to remote
:param outstream: A stream file to write output
:param errstream: A stream file to write errors
"""
# Open the repo
r = open_repo(repo)
# Get the client and path
client, path = get_transport_and_path(remote_location)
def update_refs(refs):
new_refs = r.get_refs()
refs[refs_path] = new_refs['HEAD']
del new_refs['HEAD']
return refs
try:
client.send_pack(path, update_refs,
r.object_store.generate_pack_contents, progress=errstream.write)
outstream.write("Push to %s successful.\n" % remote_location)
except (UpdateRefsError, SendPackError) as e:
outstream.write("Push to %s failed.\n" % remote_location)
errstream.write("Push to %s failed -> '%s'\n" % e.message)
def pull(repo, remote_location, refs_path,
outstream=sys.stdout, errstream=sys.stderr):
"""Pull from remote via dulwich.client
:param repo: Path to repository
:param remote_location: Location of the remote
:param refs_path: relative path to the fetched refs
:param outstream: A stream file to write to output
:param errstream: A stream file to write to errors
"""
# Open the repo
r = open_repo(repo)
client, path = get_transport_and_path(remote_location)
remote_refs = client.fetch(path, r, progress=errstream.write)
r['HEAD'] = remote_refs[refs_path]
# Perform 'git checkout .' - syncs staged changes
indexfile = r.index_path()
tree = r["HEAD"].tree
index.build_index_from_tree(r.path, indexfile, r.object_store, tree)
def status(repo):
"""Returns staged, unstaged, and untracked changes relative to the HEAD.
:param repo: Path to repository
:return: GitStatus tuple,
staged - list of staged paths (diff index/HEAD)
unstaged - list of unstaged paths (diff index/working-tree)
untracked - list of untracked, un-ignored & non-.git paths
"""
# 1. Get status of staged
tracked_changes = get_tree_changes(repo)
# 2. Get status of unstaged
unstaged_changes = list(get_unstaged_changes(repo.open_index(), repo.path))
# TODO - Status of untracked - add untracked changes, need gitignore.
untracked_changes = []
return GitStatus(tracked_changes, unstaged_changes, untracked_changes)
def get_tree_changes(repo):
"""Return add/delete/modify changes to tree by comparing index to HEAD.
:param repo: repo path or object
:return: dict with lists for each type of change
"""
r = open_repo(repo)
index = r.open_index()
# Compares the Index to the HEAD & determines changes
# Iterate through the changes and report add/delete/modify
tracked_changes = {
'add': [],
'delete': [],
'modify': [],
}
for change in index.changes_from_tree(r.object_store, r['HEAD'].tree):
if not change[0][0]:
tracked_changes['add'].append(change[0][1])
elif not change[0][1]:
tracked_changes['delete'].append(change[0][0])
elif change[0][0] == change[0][1]:
tracked_changes['modify'].append(change[0][0])
else:
raise AssertionError('git mv ops not yet supported')
return tracked_changes
def daemon(path=".", address=None, port=None):
"""Run a daemon serving Git requests over TCP/IP.
:param path: Path to the directory to serve.
"""
# TODO(jelmer): Support git-daemon-export-ok and --export-all.
from dulwich.server import (
FileSystemBackend,
TCPGitServer,
)
backend = FileSystemBackend(path)
server = TCPGitServer(backend, address, port)
server.serve_forever()
|
#!/usr/bin/env python2
"""
builtin_process.py - Builtins that deal with processes or modify process state.
This is sort of the opposite of builtin_pure.py.
"""
from __future__ import print_function
import signal # for calculating numbers
from _devbuild.gen import arg_types
from _devbuild.gen.runtime_asdl import (
cmd_value, cmd_value__Argv,
wait_status_e, wait_status__Proc, wait_status__Pipeline, wait_status__Cancelled,
)
from _devbuild.gen.syntax_asdl import source
from asdl import runtime
from core import alloc
from core import dev
from core import error
from core.pyerror import e_usage
from core import main_loop
from core.pyutil import stderr_line
from core import ui
from core import vm
from core.pyerror import log
from frontend import args
from frontend import flag_spec
from frontend import reader
from frontend import signal_def
from mycpp import mylib
from mycpp.mylib import iteritems, tagswitch
import posix_ as posix
from typing import List, Dict, Optional, Any, cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.syntax_asdl import command_t
from core.process import ExternalProgram, FdState, JobState, Waiter
from core.pyos import SignalState
from core.state import Mem, SearchPath
from core.ui import ErrorFormatter
from frontend.parse_lib import ParseContext
if mylib.PYTHON:
EXEC_SPEC = flag_spec.FlagSpec('exec')
class Exec(vm._Builtin):
def __init__(self, mem, ext_prog, fd_state, search_path, errfmt):
# type: (Mem, ExternalProgram, FdState, SearchPath, ErrorFormatter) -> None
self.mem = mem
self.ext_prog = ext_prog
self.fd_state = fd_state
self.search_path = search_path
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
arg_r = args.Reader(cmd_val.argv, spids=cmd_val.arg_spids)
arg_r.Next() # skip 'exec'
_ = EXEC_SPEC.Parse(arg_r) # no flags now, but accepts --
# Apply redirects in this shell. # NOTE: Redirects were processed earlier.
if arg_r.AtEnd():
self.fd_state.MakePermanent()
return 0
environ = self.mem.GetExported()
i = arg_r.i
cmd = cmd_val.argv[i]
argv0_path = self.search_path.CachedLookup(cmd)
if argv0_path is None:
self.errfmt.Print_('exec: %r not found' % cmd,
span_id=cmd_val.arg_spids[1])
raise SystemExit(127) # exec builtin never returns
# shift off 'exec'
c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_spids[i:], cmd_val.block)
self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
assert False, "This line should never be reached" # makes mypy happy
class Wait(vm._Builtin):
"""
wait: wait [-n] [id ...]
Wait for job completion and return exit status.
Waits for each process identified by an ID, which may be a process ID or a
job specification, and reports its termination status. If ID is not
given, waits for all currently active child processes, and the return
status is zero. If ID is a a job specification, waits for all processes
in that job's pipeline.
If the -n option is supplied, waits for the next job to terminate and
returns its exit status.
Exit Status:
Returns the status of the last ID; fails if ID is invalid or an invalid
option is given.
"""
def __init__(self, waiter, job_state, mem, tracer, errfmt):
# type: (Waiter, JobState, Mem, dev.Tracer, ErrorFormatter) -> None
self.waiter = waiter
self.job_state = job_state
self.mem = mem
self.tracer = tracer
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
return self._Run(cmd_val)
def _Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
attrs, arg_r = flag_spec.ParseCmdVal('wait', cmd_val)
arg = arg_types.wait(attrs.attrs)
job_ids, arg_spids = arg_r.Rest2()
if arg.n:
#log('*** wait -n')
# wait -n returns the exit status of the JOB.
# You don't know WHICH process, which is odd.
# TODO: this should wait for the next JOB, which may be multiple
# processes.
# Bash has a wait_for_any_job() function, which loops until the jobs
# table changes.
#
# target_count = self.job_state.NumRunning() - 1
# while True:
# if not self.waiter.WaitForOne():
# break
#
# if self.job_state.NumRunning == target_count:
# break
#
#log('wait next')
result = self.waiter.WaitForOne(False)
if result == 0: # OK
return self.waiter.last_status
elif result == -1: # nothing to wait for
return 127
else:
return result # signal
if len(job_ids) == 0:
#log('*** wait')
i = 0
while True:
# BUG: If there is a STOPPED process, this will hang forever, because
# we don't get ECHILD.
# Not sure it matters since you can now Ctrl-C it.
result = self.waiter.WaitForOne(False)
if result != 0:
break # nothing to wait for, or interrupted
i += 1
if self.job_state.NoneAreRunning():
break
return 0 if result == -1 else result
# Get list of jobs. Then we need to check if they are ALL stopped.
# Returns the exit code of the last one on the COMMAND LINE, not the exit
# code of last one to FINISH.
status = 1 # error
for i, job_id in enumerate(job_ids):
span_id = arg_spids[i]
# The % syntax is sort of like ! history sub syntax, with various queries.
# https://stackoverflow.com/questions/35026395/bash-what-is-a-jobspec
if job_id.startswith('%'):
raise error.Usage(
"doesn't support bash-style jobspecs (got %r)" % job_id,
span_id=span_id)
# Does it look like a PID?
try:
pid = int(job_id)
except ValueError:
raise error.Usage('expected PID or jobspec, got %r' % job_id,
span_id=span_id)
job = self.job_state.JobFromPid(pid)
if job is None:
self.errfmt.Print_("%s isn't a child of this shell" % pid,
span_id=span_id)
return 127
wait_status = job.JobWait(self.waiter)
UP_wait_status = wait_status
with tagswitch(wait_status) as case:
if case(wait_status_e.Proc):
wait_status = cast(wait_status__Proc, UP_wait_status)
status = wait_status.code
elif case(wait_status_e.Pipeline):
wait_status = cast(wait_status__Pipeline, UP_wait_status)
# TODO: handle PIPESTATUS? Is this right?
status = wait_status.codes[-1]
elif case(wait_status_e.Cancelled):
wait_status = cast(wait_status__Cancelled, UP_wait_status)
status = wait_status.code
else:
raise AssertionError()
return status
class Jobs(vm._Builtin):
"""List jobs."""
def __init__(self, job_state):
# type: (JobState) -> None
self.job_state = job_state
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
# NOTE: the + and - in the jobs list mean 'current' and 'previous', and are
# addressed with %+ and %-.
# [6] Running sleep 5 | sleep 5 &
# [7]- Running sleep 5 | sleep 5 &
# [8]+ Running sleep 5 | sleep 5 &
self.job_state.List()
return 0
class Fg(vm._Builtin):
"""Put a job in the foreground"""
def __init__(self, job_state, waiter):
# type: (JobState, Waiter) -> None
self.job_state = job_state
self.waiter = waiter
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
# Get job instead of PID, and then do
#
# Should we also have job.SendContinueSignal() ?
# - posix.killpg()
#
# job.WaitUntilDone(self.waiter)
# - waitpid() under the hood
pid = self.job_state.GetLastStopped()
if pid == -1:
log('No job to put in the foreground')
return 1
# TODO: Print job ID rather than the PID
log('Continue PID %d', pid)
posix.kill(pid, signal.SIGCONT)
job = self.job_state.JobFromPid(pid)
status = job.Wait(self.waiter)
#log('status = %d', status)
return status
class Bg(vm._Builtin):
"""Put a job in the background"""
def __init__(self, job_state):
# type: (JobState) -> None
self.job_state = job_state
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
# How does this differ from 'fg'? It doesn't wait and it sets controlling
# terminal?
raise error.Usage("isn't implemented")
class _TrapHandler(object):
"""A function that is called by Python's signal module.
Similar to process.SubProgramThunk."""
def __init__(self, node, nodes_to_run, sig_state, tracer):
# type: (command_t, List[command_t], SignalState, dev.Tracer) -> None
self.node = node
self.nodes_to_run = nodes_to_run
self.sig_state = sig_state
self.tracer = tracer
def __call__(self, sig_num, unused_frame):
# type: (int, Any) -> None
"""For Python's signal module."""
self.tracer.PrintMessage(
'Received signal %d. Will run handler in main loop' % sig_num)
self.sig_state.last_sig_num = sig_num # for interrupted 'wait'
self.nodes_to_run.append(self.node)
def __str__(self):
# type: () -> str
# Used by trap -p
# TODO: Abbreviate with fmt.PrettyPrint?
return '<Trap %s>' % self.node
def _GetSignalNumber(sig_spec):
# type: (str) -> int
# POSIX lists the numbers that are required.
# http://pubs.opengroup.org/onlinepubs/9699919799/
#
# Added 13 for SIGPIPE because autoconf's 'configure' uses it!
if sig_spec.strip() in ('1', '2', '3', '6', '9', '13', '14', '15'):
return int(sig_spec)
# INT is an alias for SIGINT
if sig_spec.startswith('SIG'):
sig_spec = sig_spec[3:]
return signal_def.GetNumber(sig_spec)
_HOOK_NAMES = ['EXIT', 'ERR', 'RETURN', 'DEBUG']
# TODO:
#
# bash's default -p looks like this:
# trap -- '' SIGTSTP
# trap -- '' SIGTTIN
# trap -- '' SIGTTOU
#
# CPython registers different default handlers. The C++ rewrite should make
# OVM match sh/bash more closely.
class Trap(vm._Builtin):
def __init__(self, sig_state, traps, nodes_to_run, parse_ctx, tracer, errfmt):
# type: (SignalState, Dict[str, _TrapHandler], List[command_t], ParseContext, dev.Tracer, ErrorFormatter) -> None
self.sig_state = sig_state
self.traps = traps
self.nodes_to_run = nodes_to_run
self.parse_ctx = parse_ctx
self.arena = parse_ctx.arena
self.tracer = tracer
self.errfmt = errfmt
def _ParseTrapCode(self, code_str):
# type: (str) -> command_t
"""
Returns:
A node, or None if the code is invalid.
"""
line_reader = reader.StringLineReader(code_str, self.arena)
c_parser = self.parse_ctx.MakeOshParser(line_reader)
# TODO: the SPID should be passed through argv. Use ArgvWord?
with alloc.ctx_Location(self.arena, source.Trap(runtime.NO_SPID)):
try:
node = main_loop.ParseWholeFile(c_parser)
except error.Parse as e:
ui.PrettyPrintError(e, self.arena)
return None
return node
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
attrs, arg_r = flag_spec.ParseCmdVal('trap', cmd_val)
arg = arg_types.trap(attrs.attrs)
if arg.p: # Print registered handlers
for name, value in iteritems(self.traps):
# The unit tests rely on this being one line.
# bash prints a line that can be re-parsed.
print('%s %s' % (name, value.__class__.__name__))
return 0
if arg.l: # List valid signals and hooks
for name in _HOOK_NAMES:
print(' %s' % name)
for name, int_val in signal_def.AllNames():
print('%2d %s' % (int_val, name))
return 0
code_str = arg_r.ReadRequired('requires a code string')
sig_spec, sig_spid = arg_r.ReadRequired2('requires a signal or hook name')
# sig_key is NORMALIZED sig_spec: a signal number string or string hook
# name.
sig_key = None # type: Optional[str]
sig_num = None
if sig_spec in _HOOK_NAMES:
sig_key = sig_spec
elif sig_spec == '0': # Special case
sig_key = 'EXIT'
else:
sig_num = _GetSignalNumber(sig_spec)
if sig_num is not None:
sig_key = str(sig_num)
if sig_key is None:
self.errfmt.Print_("Invalid signal or hook %r" % sig_spec,
span_id=cmd_val.arg_spids[2])
return 1
# NOTE: sig_spec isn't validated when removing handlers.
if code_str == '-':
if sig_key in _HOOK_NAMES:
try:
del self.traps[sig_key]
except KeyError:
pass
return 0
if sig_num is not None:
try:
del self.traps[sig_key]
except KeyError:
pass
self.sig_state.RemoveUserTrap(sig_num)
return 0
raise AssertionError('Signal or trap')
# Try parsing the code first.
# TODO: If simple_trap is on (for oil:basic), then it must be a function
# name? And then you wrap it in 'run'?
node = self._ParseTrapCode(code_str)
if node is None:
return 1 # ParseTrapCode() prints an error for us.
# Register a hook.
if sig_key in _HOOK_NAMES:
if sig_key in ('ERR', 'RETURN', 'DEBUG'):
stderr_line("osh warning: The %r hook isn't implemented", sig_spec)
self.traps[sig_key] = _TrapHandler(node, self.nodes_to_run,
self.sig_state, self.tracer)
return 0
# Register a signal.
if sig_num is not None:
handler = _TrapHandler(node, self.nodes_to_run, self.sig_state,
self.tracer)
# For signal handlers, the traps dictionary is used only for debugging.
self.traps[sig_key] = handler
if sig_num in (signal.SIGKILL, signal.SIGSTOP):
self.errfmt.Print_("Signal %r can't be handled" % sig_spec,
span_id=sig_spid)
# Other shells return 0, but this seems like an obvious error
return 1
self.sig_state.AddUserTrap(sig_num, handler)
return 0
raise AssertionError('Signal or trap')
# Example:
# trap -- 'echo "hi there" | wc ' SIGINT
#
# Then hit Ctrl-C.
class Umask(vm._Builtin):
def __init__(self):
# type: () -> None
"""Dummy constructor for mycpp."""
pass
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
argv = cmd_val.argv[1:]
if len(argv) == 0:
# umask() has a dumb API: you can't get it without modifying it first!
# NOTE: dash disables interrupts around the two umask() calls, but that
# shouldn't be a concern for us. Signal handlers won't call umask().
mask = posix.umask(0)
posix.umask(mask) #
print('0%03o' % mask) # octal format
return 0
if len(argv) == 1:
a = argv[0]
try:
new_mask = int(a, 8)
except ValueError:
# NOTE: This happens if we have '8' or '9' in the input too.
stderr_line("osh warning: umask with symbolic input isn't implemented")
return 1
else:
posix.umask(new_mask)
return 0
e_usage('umask: unexpected arguments')
class Fork(vm._Builtin):
def __init__(self, shell_ex):
# type: (vm._Executor) -> None
self.shell_ex = shell_ex
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
attrs, arg_r = flag_spec.ParseOilCmdVal('fork', cmd_val)
arg, span_id = arg_r.Peek2()
if arg is not None:
e_usage('got unexpected argument %r' % arg, span_id=span_id)
if cmd_val.block is None:
e_usage('expected a block')
return self.shell_ex.RunBackgroundJob(cmd_val.block)
class ForkWait(vm._Builtin):
def __init__(self, shell_ex):
# type: (vm._Executor) -> None
self.shell_ex = shell_ex
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
attrs, arg_r = flag_spec.ParseOilCmdVal('forkwait', cmd_val)
arg, span_id = arg_r.Peek2()
if arg is not None:
e_usage('got unexpected argument %r' % arg, span_id=span_id)
if cmd_val.block is None:
e_usage('expected a block')
return self.shell_ex.RunSubshell(cmd_val.block)
|
import requests
import os
import json
from Jumpscale import j
JSConfigBase = j.application.JSBaseConfigClass
class ApiError(Exception):
def __init__(self, response):
message = None
msg = "%s %s" % (response.status_code, response.reason)
try:
message = response.json()
except BaseException:
pass
if isinstance(message, (str, bytes)):
msg += "\n%s" % message
elif isinstance(message, dict) and "errormessage" in message:
msg += "\n%s" % message["errormessage"]
super(ApiError, self).__init__(msg)
self._response = response
@property
def response(self):
return self._response
class BaseResource:
def __init__(self, session, url):
self._session = session
self._url = url
self._method = "POST"
def __getattr__(self, item):
url = os.path.join(self._url, item)
resource = BaseResource(self._session, url)
setattr(self, item, resource)
return resource
def __call__(self, **kwargs):
response = self._session.request(self._method, self._url, kwargs, timeout=300)
if not response.ok:
raise ApiError(response)
if response.headers.get("content-type", "text/html") == "application/json":
return response.json()
return response.content
class Resource(BaseResource):
def __init__(self, ip, port, path):
session = requests.Session()
scheme = "http" if port != 443 else "https"
url = "%s://%s:%s/%s" % (scheme, ip, port, path.lstrip("/"))
super(Resource, self).__init__(session, url)
def load_swagger(self, file=None, group=None):
if file:
with open(file) as fd:
swagger = json.load(fd)
else:
swagger = self.system.markdowndocs.prepareCatalog(group=group)
for methodpath, methodspec in swagger["paths"].items():
api = self
for path in methodpath.split("/")[1:]:
api = getattr(api, path)
method = "post"
if "post" not in methodspec and methodspec:
method = list(methodspec.keys())[0]
api._method = method
docstring = methodspec[method]["description"]
for param in methodspec[method].get("parameters", list()):
param["type"] = param["type"] if "type" in param else str(param.get("$ref", "unknown"))
docstring += (
"""
:param %(name)s: %(description)s required %(required)s
:type %(name)s: %(type)s"""
% param
)
api.__doc__ = docstring
return swagger
class PortalClient(JSConfigBase, Resource):
_SCHEMATEXT = """
@url = jumpscale.portal.client
name* = "" (S)
ip = "" (S)
port = 8200 (ipport)
iyoinstance = "" (S)
"""
def _init(self):
ip = self.ip
port = self.port
Resource.__init__(self, ip, port, "/restmachine")
|
from __future__ import annotations
import json
import pickle
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from pathlib import Path
import os
from srim import Ion, Layer, Target # , output
from srim.srim import TRIM
from srim.output import Results
from concurrent.futures import as_completed, ProcessPoolExecutor
import multiprocessing as mp
from time import sleep
from dataclasses import asdict # , dataclass as dc
from pydantic.dataclasses import dataclass
from typing import cast, Iterable, Sequence, Set, Union, List, Tuple, Dict, NamedTuple
from typing_extensions import Literal, TypedDict
from mytypes import floatArray, precisionLitType
from matplotlib import use
use('Agg') # NoQa
class PydanticConfig:
arbitrary_types_allowed = True
@dataclass(config=PydanticConfig)
class SrimData:
folder: Path # folder the results is saved to
ion: Ion
num_ion: int
target: Target
damage_total: float
damage_array: floatArray
def __post_init__(self) -> None:
...
def __post_init_post_parse__(self) -> None:
self.results = Results(self.folder)
import re
if not self.ion:
self.ion = self.results.ioniz.ion
self.num_ions: int = self.results.ioniz.num_ions
if not self.target:
with open(R".\data\ceria_on_silica\ceria_2um_He@400keV\tdata.txt", 'r') as f:
f.read()
"""===============Target material =======================
Layer 1 """
match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
#match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
if match_target:
print(match_target.group(0))
else:
print("target not found")
# out = output.SRIM_Output()
# output.SRIM_Output._read_target(out, f.read())
# self.target = Target.
# self.layers: List[Layer] = self.target.layers
class ElemTD(TypedDict):
atomic_num: int
atomic_mass: float
E_d: float
lattice: float
surface: float
@dataclass(frozen=True)
class ElemClass:
atomic_num: int
atomic_mass: float
E_d: float = 25.0
lattice: float = 0.0
surface: float = 3.0
def __post_init__(self) -> None:
if self.E_d <= 0:
raise ValueError('Invalid E_d (negative)')
assert self.lattice >= 0
def as_dict(self) -> Dict[str, float]:
# narrow str, Any to declared dtypes
# get types from self.__annotation__ and make union in another function?
return asdict(self)
def as_typdict(self) -> ElemTD:
#dic = {str(k): float(v) for k, v in asdict(self).items()}
#ret: ElemTD = dic
#ret = cast(ElemTD, asdict(self))
return ElemTD(atomic_num=self.atomic_num,
atomic_mass=self.atomic_mass,
E_d=self.E_d,
lattice=self.lattice,
surface=self.surface)
class DamageStats(NamedTuple):
total: float
max_damage: float
max_index: int
max_depth: float
# TODO see main.py for getting element classes. Need to convert to ElemClass or not? Use Dacite for convert via dict?
# or inherit from it?
elem_ce_dict = ElemClass(E_d=25.0, lattice=3.0, surface=4.23, atomic_num=58, atomic_mass=140.1)
elem_u_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.42, 'atomic_num': 92, 'atomic_mass': 238.0}
elem_th_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.93, 'atomic_num': 90, 'atomic_mass': 232.0}
elem_o_dict = ElemClass(E_d=28.0, lattice=3.0, surface=2.00, atomic_num=8, atomic_mass=15.99)
elem_si_dict = {'E_d': 15.0, 'lattice': 2.0, 'surface': 4.70, 'atomic_num': 14, 'atomic_mass': 28.08}
elem_ti_dict = {'E_d': 28.0, 'lattice': 3.0, 'surface': 2.00, 'atomic_num': 22, 'atomic_mass': 15.99}
def make_element_subfolder_name(layer: Layer, ion: Ion,
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy.
precision is units of the layer width, default = 'um' """
if layer.name:
element_list_str = layer.name
else:
element_list = []
for (element, prop) in layer.elements.items():
stoich = prop['stoich']
# print(element.symbol, stoich)
if stoich == 1.0:
element_str = element.symbol
elif stoich.is_integer():
element_str = f'{element.symbol}{stoich:.0f}'
else:
element_str = f'{element.symbol}{stoich:.2f}'
# print(element_str)
element_list.append(element_str)
element_list_str = "-".join(element_list)
# print(element_list_str)
layer_width_nm = f'{layer.width / 10:.0f}nm'
layer_width_um = f'{layer.width / 10000:.0f}um'
ion_energy_kev = f'{ion.energy / 1000:.0f}keV'
if precision == 'um' or precision == 'micro':
layer_width = layer_width_um
elif precision == 'nm' or precision == 'nano':
layer_width = layer_width_nm
else:
layer_width = layer.width
data_subfolder_name = Path(f"{element_list_str}_{layer_width}_{ion.symbol}@{ion_energy_kev}")
# print(data_subfolder_name)
return data_subfolder_name
def make_data_path(layer: Layer,
ion: Ion,
data_path: Union[Path, str] = R'.\data',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\data'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
output_directory: Path = Path(data_path) / data_subfolder_name
output_directory.mkdir(parents=True, exist_ok=True)
return output_directory
def make_image_path(layer: Layer, ion: Ion,
image_path: Union[Path, str] = R'.\images',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\images'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
outimage_directory: Path = Path(image_path) / data_subfolder_name
outimage_directory.mkdir(parents=True, exist_ok=True)
return outimage_directory
def get_depth_damage_array(results: Results, units: str = 'nm') -> floatArray:
"""get array of [0] depths in nm and [damage] for whole target"""
if units in ('nm', 'nano'):
ratio_A_to_units = 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
ratio_A_to_units = 1
else:
raise ValueError
phon = results.phonons
dx = max(phon.depth) / 100 # ratio for eV/A to eV per measurement
energy_damage = np.array((phon.ions + phon.recoils) * dx)
depth_array = np.array(phon.depth / ratio_A_to_units)
damage_array_nm: np.ndarray[float] = np.stack((depth_array, energy_damage))
return damage_array_nm
def trunc_depth_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
"""Get list of damage up to given depth. <depth> given in <units>"""
depth_damage_array = get_depth_damage_array(results, units=units)
if depth > 0:
# print(depth_damage_array[0][depth_damage_array[0][:] <= depth])
depth_damage = depth_damage_array[:, depth_damage_array[0][:] <= depth]
else:
depth_damage = depth_damage_array[:]
return cast(floatArray, depth_damage) # up to depth if given otherwise all
def get_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
depth_damage = trunc_depth_damage_array(results, units=units, depth=depth)
damage_array = depth_damage[1]
return cast(floatArray, damage_array)
def get_damage_stats(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> DamageStats:
array = trunc_depth_damage_array(results, units=units, depth=depth)
total_damage: int = int(sum(cast(Iterable[float], array[1])))
max_damage: int = int(max(array[1]))
max_ind: int = np.argmin(array[1])
depth_of_max: float = cast(float, array[0][max_ind])
return DamageStats(total_damage, max_damage, max_ind, depth_of_max)
def plot_damage_multi(results: List[Results],
save_dir: Path,
units: precisionLitType = 'nm',
depth: int = 0
) -> None:
# phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
if depth > 0:
pass
# add doted line at depth
if isinstance(results, Results):
results = [results]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV]')
for res in results:
depth_damage_array = trunc_depth_damage_array(res, units=units, depth=depth)
damage_stats = get_damage_stats(res, units=units, depth=depth)
ion_name = res.ioniz.ion.symbol
ion_energy = int(res.ioniz.ion.energy / 1000)
legend = f'{ion_name} @ {ion_energy} keV, damage {damage_stats.total} eV'
ax.plot(depth_damage_array[0], depth_damage_array[1], label='{}'.format(legend))
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(save_dir, 'damagevsdepth_multi.png'), transparent=True)
# return fig
def plot_damage_multi_from_path(data_parent: Path,
units: precisionLitType = 'nm',
depth: int = 0,
) -> None:
loaded_data = [Results(dp) for dp in data_parent.iterdir() if dp.is_dir()]
plot_damage_multi(loaded_data, data_parent, units=units, depth=depth)
def plot_damage_energy_per_ion(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum = sum(energy_damage)
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV / ion]')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_per_ion.png'), transparent=True)
def plot_damage_energy_total(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum: float = sum(cast(Iterable[float], energy_damage))
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel(f'Collision damage [eV] (total from {phon.num_ions} ions')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_total.png'), transparent=True)
def run_srim(ion: Ion,
target: Target,
data_out_dir: Path,
num_ions: int, srim_dir: Path) -> Results:
# use layer, data_path and iob to create out_dir
# run trim, return out_dir and result
# copy result to out_dir from srim_dir
trim = TRIM(target, ion, number_ions=num_ions, calculation=1) # 1 million -> about 5 hours
results = trim.run(srim_dir)
TRIM.copy_output_files(srim_dir, data_out_dir)
print(f'{ion.symbol}-{ion.energy/1000}kev done')
return results
def plot_srim(results: Results,
image_out_dir: Path,
units: precisionLitType = 'nm',
total: bool = True,
per_ion: bool = True,
) -> None:
if total:
plot_damage_energy_total(results, image_out_dir, units=units)
if per_ion:
plot_damage_energy_per_ion(results, image_out_dir, units=units)
def combined_srim(ion: Ion,
target: Target,
data_path: Path,
num_ions: int,
srim_dir: Path) -> SrimData:
# run ions in list against layer and datapath
# get out_dir and result
# create list of folders and list of results
start = datetime.now()
pid = os.getpid() # if using processpool
data_out_dir = make_data_path(target.layers[0], ion, data_path)
image_out_dir = data_out_dir # make_image_path(target.layers[0], ion, data_path)
print(f"{data_out_dir.name} started) using PID {pid}")
result = run_srim(ion, target, data_out_dir, num_ions, srim_dir)
damage_stats = get_damage_stats(result)
damage_total = damage_stats.total
damage_array = get_depth_damage_array(result)
plot_srim(result, image_out_dir)
datum = SrimData(data_out_dir, ion, num_ions, target, damage_total, damage_array)
end = datetime.now()
duration = end - start
print(f"{data_out_dir.name} done in {str(duration).split('.', 2)[0]}") # " using PID {pid}")
return datum
def create_ion_list(ion_name: Literal['H', 'He', 'Li'],
energy_list: Union[Sequence[int], Set[int]],
units: Literal['ev', 'kev', 'mev']
) -> List[Ion]:
ion_list = [Ion(f'{ion_name}', energy=x * 1000) for x in energy_list]
return ion_list
def pool_srim(ions: Union[Sequence[Ion], Set[Ion]],
target: Target, data_path: Path, num_ions: int, srim_dir: Path) -> List[SrimData]: # List[SrimData]
# with ProcessPoolExecutor(max_workers=mp.cpu_count() - 1) as ppexc:
with ProcessPoolExecutor(max_workers=mp.cpu_count() * 5) as ppexc:
"""# using submit() and list comprehension
SrimData_futures = [ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions=1_000_000, # 1 million -> about 5 hours
srim_dir=srim_executable_directory)
for ion in ions_He_list]
"""
SrimData_futures = []
for ion in ions:
res = ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions, # 1 million -> about 5 hours
srim_dir)
sleep(1)
SrimData_futures.append(res)
"""
# alternate using map() and repeat(). # returns results in order done
SrimData_futures = ppexc.map(combined_srim,
[Ion('He', energy=1000000), Ion('He', energy=2000000)],
repeat(target),
repeat(data_path),
repeat(1_000_000), # 1 million -> about 5 hours
repeat(srim_executable_directory))
"""
Srim_data_list: List[SrimData] = [f.result() for f in as_completed(SrimData_futures)]
print(f"{len(Srim_data_list)} jobs done")
return Srim_data_list
def pickle_srim(srimdata: Union[SrimData, Sequence[SrimData]]) -> None:
# sequence or iterable?
if isinstance(srimdata, SrimData):
srimdata = [srimdata]
for srim_x in srimdata:
datapath = srim_x.folder / "result.pkl"
with open(datapath, "w+b") as pkl_f:
pickle.dump(srim_x, pkl_f)
print(f"Data pickled to {datapath}")
def json_srim(srimdata: List[SrimData]) -> None:
data_ref = srimdata if isinstance(srimdata, SrimData) else srimdata[0]
datapath = data_ref.folder.parent / "result.json"
with open(datapath, "w+") as json_f:
json.dump(srimdata, json_f)
print(f"Data save as json to {datapath}")
|
# 日本語クラスをインポートし、nlpオブジェクトを作成
from ____ import ____
nlp = ____
# テキストを処理
doc = ____("私はツリーカンガルーとイルカが好きです。")
# 最初のトークンを選択
first_token = doc[____]
# 最初のトークンのテキストをプリント
print(first_token.____)
|
import pandas as pd
import numpy as np
import sys
import utils
import config
def gen_train_sample(df):
df['target'] = (df['reference'] == df['impressions']).astype(int)
df.drop(['current_filters','reference','action_type'],axis=1,inplace=True)
df_session = df[['session_id','step']].drop_duplicates(subset='session_id',keep='last').reset_index(drop=True)
df = df_session.merge(df, on=['session_id','step'], how='left').reset_index(drop=True)
#loader.save_df(df,config.data+'m3_tr.ftr')
return df
def get_test_sample(df):
df['target'] = (df['reference'] == df['impressions']).astype(int)
# drop noisy sample
mask = (df.session_id == 'cbe3752713eee') & (df.timestamp ==1541660358)
df = df[~mask]
df_session = df[['session_id','step']].drop_duplicates(subset='session_id',keep='last').reset_index(drop=True)
df = df_session.merge(df, on=['session_id','step'], how='left').reset_index(drop=True)
te = df[pd.isnull(df['reference'])].reset_index(drop=True)
print(te.shape)
tr = df[pd.notnull(df['reference'])].reset_index(drop=True)
print(tr.shape)
tr.drop(['current_filters','reference','action_type'],axis=1,inplace=True)
te.drop(['current_filters','reference','action_type','target'],axis=1,inplace=True)
utils.save_df(te,config.data+'m3_te.ftr')
return tr,te
def gen_tr_click(df):
df = df[['session_id','reference']].drop_duplicates(subset='session_id',keep='last').reset_index(drop=True)
print(df.shape)
df = df[pd.notnull(df.reference)].reset_index(drop=True)
print(df.shape)
utils.save_df(df,config.data+'m3_tr_click.ftr')
if __name__ == '__main__':
nrow = None
train = utils.load_df(config.data+'sample_train.csv',nrows=nrow)
test = utils.load_df(config.data+'sample_test.csv',nrows=nrow)
df = pd.concat([train,test]).reset_index(drop=True)
tr1 = gen_train_sample(train)
tr2,te = get_test_sample(test)
tr = pd.concat([tr1,tr2]).reset_index(drop=True)
utils.save_df(tr1,config.data+'m3_tr.ftr')
gen_tr_click(df)
|
# -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
Link resolver objects
=====================
"""
import codecs
import gzip
from io import BytesIO
import os
import pickle
import posixpath
import re
import shelve
import sys
import urllib.request as urllib_request
import urllib.parse as urllib_parse
from urllib.error import HTTPError, URLError
from sphinx.errors import ExtensionError
from sphinx.search import js_index
from . import sphinx_compatibility
logger = sphinx_compatibility.getLogger('sphinx-gallery')
def _get_data(url):
"""Get data over http(s) or from a local file."""
if urllib_parse.urlparse(url).scheme in ('http', 'https'):
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11' # noqa: E501
headers = {'User-Agent': user_agent}
req = urllib_request.Request(url, None, headers)
resp = urllib_request.urlopen(req)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'gzip':
data = gzip.GzipFile(fileobj=BytesIO(data)).read()
elif encoding != 'plain':
raise ExtensionError('unknown encoding %r' % (encoding,))
data = data.decode('utf-8')
else:
with codecs.open(url, mode='r', encoding='utf-8') as fid:
data = fid.read()
return data
def get_data(url, gallery_dir):
"""Persistent dictionary usage to retrieve the search indexes"""
# shelve keys need to be str in python 2
if sys.version_info[0] == 2 and isinstance(url, str):
url = url.encode('utf-8')
cached_file = os.path.join(gallery_dir, 'searchindex')
search_index = shelve.open(cached_file)
if url in search_index:
data = search_index[url]
else:
data = _get_data(url)
search_index[url] = data
search_index.close()
return data
def parse_sphinx_docopts(index):
"""
Parse the Sphinx index for documentation options.
Parameters
----------
index : str
The Sphinx index page
Returns
-------
docopts : dict
The documentation options from the page.
"""
pos = index.find('var DOCUMENTATION_OPTIONS')
if pos < 0:
raise ExtensionError(
'Documentation options could not be found in index.')
pos = index.find('{', pos)
if pos < 0:
raise ExtensionError(
'Documentation options could not be found in index.')
endpos = index.find('};', pos)
if endpos < 0:
raise ExtensionError(
'Documentation options could not be found in index.')
block = index[pos + 1:endpos].strip()
docopts = {}
for line in block.splitlines():
key, value = line.split(':', 1)
key = key.strip().strip('"')
value = value.strip()
if value[-1] == ',':
value = value[:-1].rstrip()
if value[0] in '"\'':
value = value[1:-1]
elif value == 'false':
value = False
elif value == 'true':
value = True
else:
try:
value = int(value)
except ValueError:
# In Sphinx 1.7.5, URL_ROOT is a JavaScript fragment.
# Ignoring this entry since URL_ROOT is not used
# elsewhere.
# https://github.com/sphinx-gallery/sphinx-gallery/issues/382
continue
docopts[key] = value
return docopts
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, gallery_dir, relative=False):
self.doc_url = doc_url
self.gallery_dir = gallery_dir
self.relative = relative
self._link_cache = {}
if doc_url.startswith(('http://', 'https://')):
if relative:
raise ExtensionError(
'Relative links are only supported for local '
'URLs (doc_url cannot be absolute)')
index_url = doc_url + '/'
searchindex_url = doc_url + '/searchindex.js'
docopts_url = doc_url + '_static/documentation_options.js'
else:
index_url = os.path.join(doc_url, 'index.html')
searchindex_url = os.path.join(doc_url, 'searchindex.js')
docopts_url = os.path.join(
doc_url, '_static', 'documentation_options.js')
# detect if we are using relative links on a Windows system
if (os.name.lower() == 'nt' and
not doc_url.startswith(('http://', 'https://'))):
if not relative:
raise ExtensionError(
'You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# Download and find documentation options. As of Sphinx 1.7, these
# options are now kept in a standalone file called
# 'documentation_options.js'. Since SphinxDocLinkResolver can be called
# not only for the documentation which is being built but also ones
# that are being referenced, we need to try and get the index page
# first and if that doesn't work, check for the
# documentation_options.js file.
index = get_data(index_url, gallery_dir)
if 'var DOCUMENTATION_OPTIONS' in index:
self._docopts = parse_sphinx_docopts(index)
else:
docopts = get_data(docopts_url, gallery_dir)
self._docopts = parse_sphinx_docopts(docopts)
# download and initialize the search index
sindex = get_data(searchindex_url, gallery_dir)
self._searchindex = js_index.loads(sindex)
def _get_index_match(self, first, second):
try:
match = self._searchindex['objects'][first]
except KeyError:
return None
else:
if isinstance(match, dict):
try:
match = match[second]
except KeyError:
return None
elif isinstance(match, (list, tuple)): # Sphinx 5.0.0 dev
try:
for item in match:
if item[4] == second:
match = item[:4]
break
else:
return None
except Exception:
return None
return match
def _get_link_type(self, cobj):
"""Get a valid link and type_, False if not found."""
first, second = cobj['module_short'], cobj['name']
match = self._get_index_match(first, second)
if match is None and '.' in second: # possible class attribute
first, second = second.split('.', 1)
first = '.'.join([cobj['module_short'], first])
match = self._get_index_match(first, second)
if match is None:
link = type_ = None
else:
fname_idx = match[0]
objname_idx = str(match[1])
anchor = match[3]
type_ = self._searchindex['objtypes'][objname_idx]
fname = self._searchindex['filenames'][fname_idx]
# In 1.5+ Sphinx seems to have changed from .rst.html to only
# .html extension in converted files. Find this from the options.
ext = self._docopts.get('FILE_SUFFIX', '.rst.html')
fname = os.path.splitext(fname)[0] + ext
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
fullname = '.'.join([first, second])
if anchor == '':
anchor = fullname
elif anchor == '-':
anchor = (self._searchindex['objnames'][objname_idx][1] + '-' +
fullname)
link = link + '#' + anchor
return link, type_
def resolve(self, cobj, this_url, return_type=False):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobj['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
return_type : bool
If True, return the type as well.
Returns
-------
link : str or None
The link (URL) to the documentation.
type_ : str
The type. Only returned if return_type is True.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name not in self._link_cache:
# we don't have it cached
self._link_cache[full_name] = self._get_link_type(cobj)
link, type_ = self._link_cache[full_name]
if self.relative and link is not None:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return (link, type_) if return_type else link
def _handle_http_url_error(e, msg='fetching'):
if isinstance(e, HTTPError):
error_msg = '%s %s: %s (%s)' % (msg, e.url, e.code, e.msg)
elif isinstance(e, URLError):
error_msg = '%s: %s' % (msg, e.reason)
logger.warning('The following %s has occurred %s' % (
type(e).__name__, error_msg))
def _sanitize_css_class(s):
for x in '~!@$%^&*()+=,./\';:"?><[]\\{}|`#':
s = s.replace(x, '-')
return s
def _embed_code_links(app, gallery_conf, gallery_dir):
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
src_gallery_dir = os.path.join(app.builder.srcdir, gallery_dir)
for this_module, url in gallery_conf['reference_url'].items():
try:
if url is None:
doc_resolvers[this_module] = SphinxDocLinkResolver(
app.builder.outdir, src_gallery_dir, relative=True)
else:
doc_resolvers[this_module] = SphinxDocLinkResolver(
url, src_gallery_dir)
except (URLError, HTTPError) as e:
_handle_http_url_error(e)
html_gallery_dir = os.path.abspath(os.path.join(app.builder.outdir,
gallery_dir))
# patterns for replacement
link_pattern = (
'<a href="{link}" title="{title}" class="{css_class}">{text}</a>')
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
# This could be turned into a generator if necessary, but should be okay
flat = [[dirpath, filename]
for dirpath, _, filenames in os.walk(html_gallery_dir)
for filename in filenames]
iterator = sphinx_compatibility.status_iterator(
flat, 'embedding documentation hyperlinks for %s... ' % gallery_dir,
color='fuchsia', length=len(flat),
stringify_func=lambda x: os.path.basename(x[1]))
intersphinx_inv = getattr(app.env, 'intersphinx_named_inventory', dict())
builtin_modules = set(intersphinx_inv.get(
'python', dict()).get('py:module', dict()).keys())
for dirpath, fname in iterator:
full_fname = os.path.join(html_gallery_dir, dirpath, fname)
subpath = dirpath[len(html_gallery_dir) + 1:]
pickle_fname = os.path.join(src_gallery_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if not os.path.exists(pickle_fname):
continue
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
# generate replacement strings with the links
str_repl = {}
for name in sorted(example_code_obj):
cobjs = example_code_obj[name]
# possible names from identify_names, which in turn gets
# possibilities from NameFinder.get_mapping
link = type_ = None
for cobj in cobjs:
for modname in (cobj['module_short'], cobj['module']):
this_module = modname.split('.')[0]
cname = cobj['name']
# Try doc resolvers first
if this_module in doc_resolvers:
try:
link, type_ = doc_resolvers[this_module].resolve(
cobj, full_fname, return_type=True)
except (HTTPError, URLError) as e:
_handle_http_url_error(
e, msg='resolving %s.%s' % (modname, cname))
# next try intersphinx
if this_module == modname == 'builtins':
this_module = 'python'
elif modname in builtin_modules:
this_module = 'python'
if link is None and this_module in intersphinx_inv:
inv = intersphinx_inv[this_module]
if modname == 'builtins':
want = cname
else:
want = '%s.%s' % (modname, cname)
for key, value in inv.items():
# only python domain
if key.startswith('py') and want in value:
link = value[want][2]
type_ = key
break
# differentiate classes from instances
is_instance = (type_ is not None and
'py:class' in type_ and
not cobj['is_class'])
if link is not None:
# Add CSS classes
name_html = period.join(orig_pattern % part
for part in name.split('.'))
full_function_name = '%s.%s' % (modname, cname)
css_class = ("sphx-glr-backref-module-" +
_sanitize_css_class(modname))
if type_ is not None:
css_class += (" sphx-glr-backref-type-" +
_sanitize_css_class(type_))
if is_instance:
css_class += " sphx-glr-backref-instance"
str_repl[name_html] = link_pattern.format(
link=link, title=full_function_name,
css_class=css_class, text=name_html)
break # loop over possible module names
if link is not None:
break # loop over cobjs
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
regex_str = '|'.join(re.escape(name) for name in names)
regex = re.compile(regex_str)
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with codecs.open(full_fname, 'r', 'utf-8') as fid:
lines_in = fid.readlines()
with codecs.open(full_fname, 'w', 'utf-8') as fid:
for line in lines_in:
line_out = regex.sub(substitute_link, line)
fid.write(line_out)
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
gallery_conf = app.config.sphinx_gallery_conf
# XXX: Whitelist of builders for which it makes sense to embed
# hyperlinks inside the example html. Note that the link embedding
# require searchindex.js to exist for the links to the local doc
# and there does not seem to be a good way of knowing which
# builders creates a searchindex.js.
if app.builder.name not in ['html', 'readthedocs']:
return
logger.info('embedding documentation hyperlinks...', color='white')
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
for gallery_dir in gallery_dirs:
_embed_code_links(app, gallery_conf, gallery_dir)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-27 16:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('trade_id', models.CharField(blank=True, max_length=100, null=True, unique=True, verbose_name='支付编号')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.OrderInfo', verbose_name='订单')),
],
options={
'verbose_name': '支付信息',
'verbose_name_plural': '支付信息',
'db_table': 'tb_payment',
},
),
]
|
# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright (c) 2017, Toshio Kuratomi <tkuraotmi@ansible.com>
# Copyright (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import copy
import traceback
import os
from contextlib import contextmanager
from ansible.config.manager import ensure_type
from ansible.errors import (
AnsibleError,
AnsibleFileNotFound,
AnsibleAction,
AnsibleActionFail,
)
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types, iteritems
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
DEFAULT_NEWLINE_SEQUENCE = "\n"
def _ensure_invocation(self, result):
# NOTE: adding invocation arguments here needs to be kept in sync with
# any no_log specified in the argument_spec in the module.
if "invocation" not in result:
if self._play_context.no_log:
result["invocation"] = "CENSORED: no_log is set"
else:
result["invocation"] = self._task.args.copy()
result["invocation"]["module_args"] = self._task.args.copy()
return result
@contextmanager
def get_template_data(self, template_path):
try:
source = self._find_needle("templates", template_path)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
# Get vault decrypted tmp file
try:
tmp_source = self._loader.get_real_file(source)
except AnsibleFileNotFound as e:
raise AnsibleActionFail(
"could not find template=%s, %s" % (source, to_text(e))
)
b_tmp_source = to_bytes(tmp_source, errors="surrogate_or_strict")
try:
with open(b_tmp_source, "rb") as f:
try:
template_data = to_text(f.read(), errors="surrogate_or_strict")
except UnicodeError:
raise AnsibleActionFail(
"Template source files must be utf-8 encoded"
)
yield template_data
except AnsibleAction:
raise
except Exception as e:
raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
finally:
self._loader.cleanup_tmp_file(b_tmp_source)
def get_template_args(self, template):
template_param = {
"newline_sequence": self.DEFAULT_NEWLINE_SEQUENCE,
"variable_start_string": None,
"variable_end_string": None,
"block_start_string": None,
"block_end_string": None,
"trim_blocks": True,
"lstrip_blocks": False,
}
if isinstance(template, string_types):
# treat this as raw_params
template_param["path"] = template
elif isinstance(template, dict):
template_args = template
template_path = template_args.get("path", None)
if not template_path:
raise AnsibleActionFail("Please specify path for template.")
template_param["path"] = template_path
# Options type validation strings
for s_type in (
"newline_sequence",
"variable_start_string",
"variable_end_string",
"block_start_string",
"block_end_string",
):
if s_type in template_args:
value = ensure_type(template_args[s_type], "string")
if value is not None and not isinstance(value, string_types):
raise AnsibleActionFail(
"%s is expected to be a string, but got %s instead"
% (s_type, type(value))
)
try:
template_param.update(
{
"trim_blocks": boolean(
template_args.get("trim_blocks", True), strict=False
),
"lstrip_blocks": boolean(
template_args.get("lstrip_blocks", False), strict=False
),
}
)
except TypeError as e:
raise AnsibleActionFail(to_native(e))
template_param.update(
{
"newline_sequence": template_args.get(
"newline_sequence", self.DEFAULT_NEWLINE_SEQUENCE
),
"variable_start_string": template_args.get(
"variable_start_string", None
),
"variable_end_string": template_args.get(
"variable_end_string", None
),
"block_start_string": template_args.get("block_start_string", None),
"block_end_string": template_args.get("block_end_string", None),
}
)
else:
raise AnsibleActionFail(
"Error while reading template file - "
"a string or dict for template expected, but got %s instead"
% type(template)
)
return template_param
def import_jinja2_lstrip(self, templates):
# Option `lstrip_blocks' was added in Jinja2 version 2.7.
if any(tmp["lstrip_blocks"] for tmp in templates):
try:
import jinja2.defaults
except ImportError:
raise AnsibleError(
"Unable to import Jinja2 defaults for determining Jinja2 features."
)
try:
jinja2.defaults.LSTRIP_BLOCKS
except AttributeError:
raise AnsibleError(
"Option `lstrip_blocks' is only available in Jinja2 versions >=2.7"
)
def load_template(self, template, new_module_args, task_vars):
# template is only supported by k8s module.
if self._task.action not in (
"k8s",
"kubernetes.core.k8s",
"community.okd.k8s",
"redhat.openshift.k8s",
"community.kubernetes.k8s",
"openshift_adm_groups_sync",
"community.okd.openshift_adm_groups_sync",
"redhat.openshift.openshift_adm_groups_sync",
):
raise AnsibleActionFail(
"'template' is only a supported parameter for the 'k8s' module."
)
template_params = []
if isinstance(template, string_types) or isinstance(template, dict):
template_params.append(self.get_template_args(template))
elif isinstance(template, list):
for element in template:
template_params.append(self.get_template_args(element))
else:
raise AnsibleActionFail(
"Error while reading template file - "
"a string or dict for template expected, but got %s instead"
% type(template)
)
self.import_jinja2_lstrip(template_params)
wrong_sequences = ["\\n", "\\r", "\\r\\n"]
allowed_sequences = ["\n", "\r", "\r\n"]
result_template = []
old_vars = self._templar.available_variables
default_environment = {}
for key in (
"newline_sequence",
"variable_start_string",
"variable_end_string",
"block_start_string",
"block_end_string",
"trim_blocks",
"lstrip_blocks",
):
if hasattr(self._templar.environment, key):
default_environment[key] = getattr(self._templar.environment, key)
for template_item in template_params:
# We need to convert unescaped sequences to proper escaped sequences for Jinja2
newline_sequence = template_item["newline_sequence"]
if newline_sequence in wrong_sequences:
template_item["newline_sequence"] = allowed_sequences[
wrong_sequences.index(newline_sequence)
]
elif newline_sequence not in allowed_sequences:
raise AnsibleActionFail(
"newline_sequence needs to be one of: \n, \r or \r\n"
)
# template the source data locally & get ready to transfer
with self.get_template_data(template_item["path"]) as template_data:
# add ansible 'template' vars
temp_vars = copy.deepcopy(task_vars)
for key, value in iteritems(template_item):
if hasattr(self._templar.environment, key):
if value is not None:
setattr(self._templar.environment, key, value)
else:
setattr(
self._templar.environment,
key,
default_environment.get(key),
)
self._templar.available_variables = temp_vars
result = self._templar.do_template(
template_data,
preserve_trailing_newlines=True,
escape_backslashes=False,
)
result_template.append(result)
self._templar.available_variables = old_vars
resource_definition = self._task.args.get("definition", None)
if not resource_definition:
new_module_args.pop("template")
new_module_args["definition"] = result_template
def get_file_realpath(self, local_path):
# local_path is only supported by k8s_cp module.
if self._task.action not in (
"k8s_cp",
"kubernetes.core.k8s_cp",
"community.kubernetes.k8s_cp",
):
raise AnsibleActionFail(
"'local_path' is only supported parameter for 'k8s_cp' module."
)
if os.path.exists(local_path):
return local_path
try:
# find in expected paths
return self._find_needle("files", local_path)
except AnsibleError:
raise AnsibleActionFail(
"%s does not exist in local filesystem" % local_path
)
def get_kubeconfig(self, kubeconfig, remote_transport, new_module_args):
if isinstance(kubeconfig, string_types):
# find the kubeconfig in the expected search path
if not remote_transport:
# kubeconfig is local
# find in expected paths
kubeconfig = self._find_needle("files", kubeconfig)
# decrypt kubeconfig found
actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
new_module_args["kubeconfig"] = actual_file
elif isinstance(kubeconfig, dict):
new_module_args["kubeconfig"] = kubeconfig
else:
raise AnsibleActionFail(
"Error while reading kubeconfig parameter - "
"a string or dict expected, but got %s instead" % type(kubeconfig)
)
def run(self, tmp=None, task_vars=None):
""" handler for k8s options """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Check current transport connection and depending upon
# look for kubeconfig and src
# 'local' => look files on Ansible Controller
# Transport other than 'local' => look files on remote node
remote_transport = self._connection.transport != "local"
new_module_args = copy.deepcopy(self._task.args)
kubeconfig = self._task.args.get("kubeconfig", None)
if kubeconfig:
try:
self.get_kubeconfig(kubeconfig, remote_transport, new_module_args)
except AnsibleError as e:
result["failed"] = True
result["msg"] = to_text(e)
result["exception"] = traceback.format_exc()
return result
# find the file in the expected search path
src = self._task.args.get("src", None)
if src:
if remote_transport:
# src is on remote node
result.update(
self._execute_module(
module_name=self._task.action, task_vars=task_vars
)
)
return self._ensure_invocation(result)
# src is local
try:
# find in expected paths
src = self._find_needle("files", src)
except AnsibleError as e:
result["failed"] = True
result["msg"] = to_text(e)
result["exception"] = traceback.format_exc()
return result
if src:
new_module_args["src"] = src
template = self._task.args.get("template", None)
if template:
self.load_template(template, new_module_args, task_vars)
local_path = self._task.args.get("local_path")
state = self._task.args.get("state", None)
if local_path and state == "to_pod":
new_module_args["local_path"] = self.get_file_realpath(local_path)
# Execute the k8s_* module.
module_return = self._execute_module(
module_name=self._task.action,
module_args=new_module_args,
task_vars=task_vars,
)
# Delete tmp path
self._remove_tmp_path(self._connection._shell.tmpdir)
result.update(module_return)
return self._ensure_invocation(result)
|
# Given a DNA string s of length at most 1000 bp
# Return the reverse complement s^c of s
import sys
def main():
with open(sys.argv[1]) as fs:
dna = fs.read()
complements = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}
reverse = dna[::-1]
reverse = reverse.lstrip()
reverseComplement = ''
for base in reverse:
reverseComplement += complements[base]
print(reverseComplement)
if __name__ == '__main__':
main()
|
BLOG_ITEMS_PER_PAGE = 'BLOG_ITEMS_PER_PAGE'
ATTRIBUTE_WEBSITE_TITLE = 'ATTRIBUTE_WEBSITE_TITLE'
COVER_LETTER = 'COVER_LETTER'
ATTRIBUTE_WEBSITE_KEYWORDS = 'ATTRIBUTE_WEBSITE_KEYWORDS'
ATTRIBUTE_DESCRIPTION = 'ATTRIBUTE_DESCRIPTION'
ATTRIBUTE_JOB_POSITION = 'ATTRIBUTE_JOB_POSITION'
ATTRIBUTE_NAME = 'ATTRIBUTE_NAME'
ATTRIBUTE_EMAIL = 'ATTRIBUTE_EMAIL'
ATTRIBUTE_PHONE = 'ATTRIBUTE_PHONE'
ATTRIBUTE_WEBSITE = 'ATTRIBUTE_WEBSITE'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.