id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11583646
|
import subprocess
import numpy as np
import argparse
import torch
from torch import optim, nn
from two_FC_layer_model_Audio import Two_FC_layer
import os
import time
import gc
from collections import Mapping, Container
from sys import getsizeof
import h5py
from torch.utils.data import DataLoader, Dataset
from pytorchtools import EarlyStopping
from scipy.stats import pearsonr
from sklearn import metrics
from torch.nn import functional as F
# Note: AlexNet is downloaded from "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth"
def deep_getsizeof(o, ids):
d = deep_getsizeof
if id(o) in ids:
return 0
r = getsizeof(o)
ids.add(id(o))
if isinstance(o, str) or isinstance(0, np.unicode):
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r
# Memory check
def memoryCheck():
ps = subprocess.Popen(['nvidia-smi', '--query-gpu=memory.used,utilization.gpu', '--format=csv'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ps.communicate(), '\n')
os.system("free -m")
# Free memory
def freeCacheMemory():
torch.cuda.empty_cache()
gc.collect()
# Build dataloaders
def train_dataloader_for_FC_model_Arousal(trfeatures, trarousal, args):
class my_dataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
# Convert a dictionary to a tensor
train_features = np.concatenate([value.unsqueeze(0) for _, value in trfeatures.items()], axis=1)
train_features = train_features.squeeze(0)
#
train_arousal = np.concatenate([value.unsqueeze(0) for _, value in trarousal.items()], axis=1)
train_arousal = train_arousal.reshape(-1, 1)
#
# Build dataloaders
train_loader = DataLoader(dataset=my_dataset(np.array(train_features), train_arousal), batch_size=args.batch_size, shuffle=True)
#
return train_loader
def validate_dataloader_for_FC_model_Arousal(tfeatures, tarousal, tarousal_cont, args):
class my_dataset(Dataset):
def __init__(self, data, label, cont_gtruth):
self.data = data
self.label = label
self.cont_gtruth = cont_gtruth
def __getitem__(self, index):
return self.data[index], self.label[index], self.cont_gtruth[index]
def __len__(self):
return len(self.data)
# Build dataloaders
validate_loader = DataLoader(dataset=my_dataset(np.array(tfeatures), np.array(tarousal.reshape(-1,1)), np.array(tarousal_cont.reshape(-1,1))), batch_size=args.batch_size, shuffle=False)
#
return validate_loader
# Train
def train_func(train_loader, vfeature, varousal, the_model, device, criter, optimizer, n_epochs, input_size, patience):
start_time = time.time()
the_model.train() # pre model for training
#
# to track the training loss as the model trains
train_losses = []
# to track the validation loss as the model trains
# to track the average training loss per epoch as the model trains
avg_train_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
#
# initialize the early_stopping object
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(1, n_epochs + 1):
# Adjust learning rate
# adjust_learning_rate(optimizer, epoch)
###################
# train the model #
###################
the_model.train() # prep model for training
for (feature, arousal) in train_loader:
feature, arousal = feature.to(device), arousal.to(device)
#
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = the_model.forward(feature.reshape(-1, input_size))
output = output/T
# calculate the loss
# KL Loss
# output = F.log_softmax(output, dim=1)
# loss = criter(output.float(), arousal.float())
#-----------------------------------------------------------------------------
# Cross Entropy Loss
loss = criter(output.squeeze(1), arousal.squeeze(1)) # CrossEntropy Loss
#-----------------------------------------------------------------------------
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward(retain_graph=True)
# perform a single optimization step (parameter update)
optimizer.step()
# record training loss
train_losses.append(loss.item())
######################
# validate the model #
######################
the_model.eval() # prep model for evaluation
vfeature, varousal = vfeature.to(device), varousal.to(device)
valid_output = the_model(vfeature)
valid_output = valid_output/T
# validation loss:
# Cross Entropy Loss
valid_loss = criter(valid_output.squeeze(1), varousal)
#----------------------------------------------------------------------------
# KL loss
#valid_output = F.log_softmax(valid_output,dim=1)
#valid_loss = criter(valid_output.float(), varousal.unsqueeze(1).float())
#----------------------------------------------------------------------------
# print training/validation statistics
# calculate average loss over an epoch
train_loss = np.average(train_losses)
avg_train_losses.append(train_loss)
avg_valid_losses.append(valid_loss.item())
epoch_len = len(str(n_epochs))
print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}]' +
f' train_loss: {train_loss:.8f} ' +
f' valid_loss: {valid_loss:.8f} ')
print(print_msg)
# clear lists to track next epoch
train_losses = []
# early_stopping needs the (1-valid_pearson) to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
early_stopping(valid_loss.item(), the_model)
print('Epoch[{}/{}]: Training time: {} seconds '.format(epoch,n_epochs, time.time() - start_time))
start_time = time.time()
#
del valid_output
freeCacheMemory()
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
the_model.load_state_dict(torch.load('checkpoint.pt'))
return the_model, avg_train_losses, avg_valid_losses
# Validate
def validate_func(feature, arousal, the_model, device):
#
the_model.eval()
#
feature, arousal = feature.to(device), arousal.to(device)
output = the_model(feature)
output /= T
# Accuracy and Accuracy +-1
_, prediction = torch.max(output.data, 1)
# prediction = prediction.cpu().numpy()
test_acc = torch.sum(prediction == arousal)
# Compute the average accuracy and loss over all validate dataset
test_acc = np.float32(test_acc.item()/output.size()[0])
test_acc_1 = 0
bin_bias = np.abs((prediction - arousal).cpu())
for element in bin_bias:
if element.item() == 1:
test_acc_1 += 1
test_acc_1 = test_acc_1/output.size()[0]
print('Validation (Use both Audio and Video features): ')
print('- Discrete case: For Arousal: Accuracy: {:.5f} %, Accuracy+/-1: {:.5f} % \n'.format(100 * test_acc, 100 * test_acc_1))
return prediction, test_acc, test_acc_1
# Decay the learning rate
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
newlr = args.lr * (0.1 ** (epoch // 25))
for param_group in optimizer.param_groups:
param_group['lr'] = newlr
# Checkpoint
def checkpoint(model_checkpoint, epoch):
model_out_path = dir_path + 'Thao_model/' + "model_epoch_{}.pth".format(epoch)
torch.save(model_checkpoint, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
# Load extracted features and arousal files
def loadingfiles(device):
# Load extracted features and arousal .h5 files
print('\n')
print('Loading h5 files containing extracted features and arousal values.....')
loading_time = time.time()
h5file = h5py.File(os.path.join(dir_path, 'only_audio.h5'), 'r')
train_features = {}
for k, v in h5file.items():
train_features[int(k)] = torch.from_numpy(v.value) #.to(device) # Convert numpy arrays to tensors on gpu # .to(device)
h5file.close()
#
print('Time for loading extracted features: ', time.time() - loading_time)
#
h5file = h5py.File(os.path.join(dir_path, 'my_discrete_arousal_Audio.h5'), 'r')
train_arousal = {}
for k, v in h5file.items():
train_arousal[int(k)] = torch.from_numpy(v.value) #.to(device) # Convert numpy arrays to tensors on gpu # .to(device)
h5file.close()
return train_features, train_arousal
# Main
def main(args):
# Device configuration
use_cuda = not args.no_cuda and torch.cuda.is_available()
# Manual seed
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print('Device: ', device)
#------------------------------------------------------------------------------------------------
# input_size for the 2FC-layer model
input_size = 1582
#-----------------------------------------------------------------------------------------------
# Cross-validation
print('Cross-validation.....')
Accuracy_ave = 0
Accuracy_1_ave = 0
movlistlength = len(movlist)
for index in range(0, movlistlength):
m_start_time = time.time()
# Build the model
model = Two_FC_layer().to(device)
# Loss and optimizer
# Cross Entropy Loss
criterion = nn.CrossEntropyLoss()
#---------------------------------------------------------------------------------
# KL Loss
# criterion = nn.KLDivLoss()
#---------------------------------------------------------------------------------
optimizer = torch.optim.SGD(model.parameters(), args.lr, weight_decay=args.dw) # 0.05
# for model training
train_features, train_arousal = loadingfiles(device)
# for model validation
validate_features = train_features[index].clone()
validate_arousal = train_arousal[index].clone()
# for model training
train_features.pop(index)
train_arousal.pop(index)
#
train_dataset = train_dataloader_for_FC_model_Arousal(train_features, train_arousal, args)
# validate_dataset = validate_dataloader_for_FC_model_Arousal(validate_features, validate_arousal, validate_cont_arousal, args)
# Train and validate on each epoch
print('Validate on: ', movlist[index],'. Train on the rest.')
model, train_losses, valid_losses = train_func(train_dataset, validate_features, validate_arousal, model, device, criterion, optimizer, args.num_epochs, input_size, args.patience)
print('Training time for ', movlist[index], ': ', time.time() - m_start_time)
val_output_disc, val_accuracy, val_accuracy_1 = validate_func(validate_features, validate_arousal, model, device)
Accuracy_ave += val_accuracy
Accuracy_1_ave += val_accuracy_1
#----------------------------------------------------------------------------------------------------------
# Save model
# Model name
model_name = movlist[index] + '_emobase2010_2FC__Arousal_Audio.pth'
torch.save(model.state_dict(), os.path.join(args.model_path, model_name))
#---------------------------------------------------------------------------------------------------------------
# save predicted arousal labels
afilename = movlist[index] + '_predArousal_emobase2010_2FC_Audio.h5'
h5file = h5py.File(os.path.join(pred_path, afilename), mode='w')
savedata = val_output_disc.cpu()
h5file.create_dataset('default', data=np.array(savedata.detach().numpy(), dtype=np.int32))
h5file.close()
# Free memory
del model, optimizer, validate_features, validate_arousal, val_output_disc, train_features, train_arousal
freeCacheMemory()
#
print('Running time for ', movlist[index], ' : ', time.time() - m_start_time)
print('After validation: ')
memoryCheck()
Accuracy_1_ave += Accuracy_ave
print('-----------------------------------------------RESULTS----------------------------------------------- \n')
print('12-fold cross-validation: ')
print('For discrete case: Arousal: Accuracy: {:.5f}, Accuracy+/-1: {:.5f} \n'.format(
100 * Accuracy_ave / movlistlength, 100 * Accuracy_1_ave / movlistlength))
if __name__ == "__main__":
#
dir_path = '/home/minhdanh/Documents/2FC_Audio'
model_path = os.path.join(dir_path, 'Thao_model') # path to save models
pred_path = os.path.join(dir_path, 'PredictedValues') # path to save predicted arousal values
# ------------------------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default= model_path, help='path for saving trained models')
#-------------------------------------------------------------------------------------------------------------------
parser.add_argument('--num_epochs', type=int, default=200) # 200
parser.add_argument('--patience', type=int, default=25, help ='early stopping patience; how long to wait after last time validation loss improved')
parser.add_argument('--batch_size', type=int, default=128, help = 'number of feature vectors loaded per batch') #128
parser.add_argument('--lr', type=float, default = 0.001, metavar='LR', help = 'initial learning rate') # 0.005
parser.add_argument('--dw', type=float, default = 0.001, metavar='DW', help = 'decay weight')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 123)')
args = parser.parse_args()
print(args)
# ------------------------------------------------------------------------------------------------------------------
movlist = ['BMI', 'LOR', 'GLA', 'DEP', 'CRA', 'CHI', 'FNE', 'ABE', 'MDB', 'NCO', 'RAT', 'SIL']
# Temperature in softmax
T = 2.0
# Means of bins:
num_bins = 7
step = 2.0 / num_bins
bin_means = np.array([np.float32(-1.0 + step / 2.0)])
for i in range(1, num_bins):
binmean = (-1.0 + step / 2.0) + i * step
bin_means = np.append(bin_means, np.float32(binmean))
#-------------------------------------------------------------------------------------------------------------------
# Note: OF_image_names.csv and image-values.csv must have the same row numbers (number of opt. flow images = numb of images)
main_start_time = time.time()
main(args)
print('Total running time: {:.5f} seconds' .format(time.time() - main_start_time))
|
11583649
|
import sys
import json
import os
import logging
from logging.config import dictConfig
from flask import Flask
from flask_cors import CORS
from flask_restful import reqparse, abort, Api, Resource
from flask import request, jsonify
import base64
import yaml
import argparse
import textwrap
sys.path.append("/BingServer/src/RestAPI/utils")
import hashlib
import jinja2
from jinja2 import Template
from bs4 import BeautifulSoup
import requests
import re
import sys
import os
import http.cookiejar
import json
import uuid
import urllib.request, urllib.error, urllib.parse
import subprocess
def get_soup(url,header):
#return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),
# 'html.parser')
return BeautifulSoup(urllib.request.urlopen(
urllib.request.Request(url,headers=header)),
'html.parser')
def query_image(query_input):
query= query_input.split()
query='+'.join(query)
query = query.encode('utf-8').decode('ascii','ignore')
url="http://www.bing.com/images/search?q=" + query + "&FORM=HDRSC2"
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("a",{"class":"iusc"}):
#print a
mad = json.loads(a["mad"])
turl = mad["turl"]
m = json.loads(a["m"])
murl = m["murl"]
image_name = urllib.parse.urlsplit(murl).path.split("/")[-1]
print(image_name)
ActualImages.append({ "imgname": image_name, "turl" : turl, "murl": murl})
logger.info("Query", query_input, " returns ", len(ActualImages),"images: ", ActualImages )
return ActualImages
def query_image2(query_input):
query= query_input.split()
query='+'.join(query)
query = query.encode('utf-8').decode('ascii','ignore')
url="http://www.bing.com/images/search?q=" + query + "&qft=+filterui:imagesize-medium"+ "&FORM=HDRSC2"
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("a",{"class":"iusc"}):
#print a
mad = json.loads(a["mad"])
turl = mad["turl"]
m = json.loads(a["m"])
murl = m["murl"]
image_name = urllib.parse.urlsplit(murl).path.split("/")[-1]
print(image_name)
ActualImages.append({ "imgname": image_name, "turl" : turl, "murl": murl})
logger.info("Query", query_input, " returns ", len(ActualImages),"images: ", ActualImages )
return ActualImages
datadir = "/var/lib/bingserver"
dir_path = os.path.dirname(os.path.realpath(__file__))
def exec_cmd_local(execmd, supressWarning = False):
if supressWarning:
cmd += " 2>/dev/null"
try:
logger.info("Executing ... %s" % execmd )
output = subprocess.check_output( execmd, shell=True )
except subprocess.CalledProcessError as e:
output = "Return code: %s, output: %s " % (e.returncode, e.output.strip())
# print output
return output
with open('/BingServer/src/RestAPI/logging.yaml', 'r') as f:
logging_config = yaml.load(f)
dictConfig(logging_config)
logger = logging.getLogger("bingserver")
logger = logging.getLogger("bingserver")
logger.info("-------------------- bingserver Started -----------------")
#sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../utils"))
app = Flask(__name__)
CORS(app)
api = Api(app)
parser = reqparse.RequestParser()
class Info(Resource):
def get(self):
return "This is a server v2 help to execute and parse bing seach results."
api.add_resource(Info, '/Info')
class BingImageSearch(Resource):
def get(self, query):
ActualImages = query_image(query)
return jsonify(images=ActualImages)
api.add_resource(BingImageSearch, '/api/BingImageSearch/<string:query>')
class BingImageSearch2(Resource):
def get(self, query):
ActualImages = query_image(query)
return jsonify(images=ActualImages)
api.add_resource(BingImageSearch2, '/api/BingImageSearch2/<string:query>')
class receiveImg (Resource):
def post(self):
logger.info("Received image request %s" % request)
name = str(uuid.uuid4())
dirname = "/var/log/apache2"
savename = os.path.join(dirname, name + ".jpg")
logger.info("To save image to to %s" % savename)
file = request.files['file']
extension = os.path.splitext(file.filename)[1]
f_name = str(uuid.uuid4()) + extension
full_filename = os.path.join(dirname, f_name)
file.save(full_filename)
logger.info("Image saves to %s" % full_filename)
recognCmd = "sudo /usr/bin/python3 /root/models/tutorials/image/imagenet/classify_image.py --image_file " + full_filename
output = exec_cmd_local(recognCmd)
logger.info("Recognition result: %s" % output)
return jsonify( imgname = output)
api.add_resource(receiveImg, "/api/receiveImg")
if __name__ == '__main__':
dirpath = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
# print "Directory: " + dirpath
os.chdir(dirpath)
parser = argparse.ArgumentParser( prog='restapi.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
A rest API for the bing server. Without any argument, run API server.
''') )
parser.add_argument("-i", "--image",
help="Query for images",
action="store",
default=None )
args = parser.parse_args()
if args.image is not None:
query_image( args.image )
else:
logging.info( "Main program starts")
app.run(debug=False,host="0.0.0.0",port=180, threaded=True)
|
11583683
|
from typing import Dict
import torch
import pickle
MAX_SIZE_LIMIT = 65533
def update_d1_with_d2(d1: Dict, d2: Dict): # update d1 with d2
if d2 is None:
return
for k, v in d2.items():
d1[k] = d1.get(k, v)
def object_to_byte_tensor(obj, max_size=4094):
"""Encode Python objects to PyTorch byte tensors."""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(f'objects too large: object size {obj_size}, max size {max_size}')
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2:2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor
def byte_tensor_to_object(byte_tensor, max_size=4094):
"""Decode PyTorch byte tensors to Python objects."""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2:2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj
|
11583711
|
import pandas as pd
import torch
from PIL import Image, ImageFile
from torch.utils.data import Dataset
import os
device = torch.device("cuda:0")
ImageFile.LOAD_TRUNCATED_IMAGES = True
class CollectionsDataset(Dataset):
def __init__(self, csv_file, root_dir, num_classes, image_size, folds=None, transform=None):
if folds is None:
folds = []
self.data = pd.read_csv(csv_file)
if len(folds) > 0:
self.data = self.data[self.data.fold.isin(folds)].reset_index(drop=True)
self.root_dir = root_dir
self.transform = transform
self.num_classes = num_classes
self.image_size = image_size
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.data.loc[idx, 'id'] + '.png')
image = Image.open(img_name)
labels = self.data.loc[idx, 'attribute_ids']
labels = labels.split()
label_tensor = torch.zeros(self.num_classes)
for i in labels:
label_tensor[int(i)] = 1
if self.transform:
image = self.transform(image)
return {'image': image,
'labels': label_tensor
}
class CollectionsDatasetTest(Dataset):
def __init__(self, csv_file, root_dir, image_size, transform=None):
self.data = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.image_size = image_size
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.data.loc[idx, 'id'] + '.png')
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
return {'image': image}
|
11583791
|
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
pascal = []
for i in range(numRows):
if i == 0:
array = [i + 1] #start the array by inserting the first array
pascal.append(array) #append array to result list
else:
array = [0] + array + [0]
array = [sum(array[i:i+2]) for i in range(len(array)-1)] #list comprehension to sum and create array
pascal.append(array)
return pascal
|
11583819
|
import domoticz
from devices.light.on_off import OnOffLight
class DimmerLight(OnOffLight):
MAX_BRIGHTNESS = 100
def create_device(self, unit, device_id, device_name):
return domoticz.create_device(Unit=unit, DeviceID=device_id, Name=device_name, Type=244, Subtype=73, Switchtype=7)
def set_brightness_feature(self, feature):
self.brightness_feature = feature
# Returns brigtness in 0..100 range
def get_brightness_value(self, value):
value_key = self.brightness_feature['property']
value_min = self.brightness_feature['value_min']
value_max = self.brightness_feature['value_max']
if value_key in value:
return int((value[value_key] - value_min) * self.MAX_BRIGHTNESS / (value_max - value_min))
else:
return None
def get_brightness_command(self, level):
value_key = self.brightness_feature['property']
value_min = self.brightness_feature['value_min']
value_max = self.brightness_feature['value_max']
value = value_min + int(level * (value_max - value_min) / self.MAX_BRIGHTNESS)
return {
value_key: value
}
def get_numeric_value(self, value, device):
state = self.get_state_value(value)
brightness = self.get_brightness_value(value)
if state != None:
return super().get_numeric_value(value, device)
elif brightness != None:
return 1 if brightness > 0 else 0
else:
return device.nValue
def get_string_value(self, value, device):
brightness = self.get_brightness_value(value)
if (brightness != None):
return str(brightness)
else:
return device.sValue
def generate_command(self, command, level, color):
cmd = command.upper()
if cmd == 'SET LEVEL':
state_value_key = self.state_feature['property']
return dict({
state_value_key: self.state_feature['value_on'],
}, **self.get_brightness_command(level))
return super().generate_command(command, level, color)
|
11583824
|
from pinyin import get_pinyin
def dummy_test():
assert get_pinyin('你好', 'ni3 hao3')
if __name__ == "__main__":
print(get_pinyin("你好?中文!中文的,符号"))
|
11583870
|
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from environment import Environment
from tasks import LinearFATileCoding3456BalanceTask
from training import LinearFATraining
from learners import SARSALambda_LinFA_ReplacingTraces
task = LinearFATileCoding3456BalanceTask(max_time=50.0)
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.epsilonGreedy = True
agent.init_exploration = 0.3
# The state has a huge number of dimensions, and the logging causes me to run
# out of memory. We needn't log, since learning is done online.
agent.logging = False
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
experiment = EpisodicExperiment(task, agent)
# TODO PyBrain says that the learning rate needs to decay, but I don't see that
# described in Randlov's paper.
# A higher number here means the learning rate decays slower.
learner.learningRateDecay = 100000
# NOTE increasing this number above from the default of 100 is what got the
# learning to actually happen, and fixed the bug/issue where the performance
# agent's performance stopped improving.
tr = LinearFATraining('balance_sarsalambda_linfa_replacetrace', experiment,
performance_agent, verbose=True)
tr.train(2000000, performance_interval=50, n_performance_episodes=5,
serialization_interval=50)
|
11583903
|
import os
import torch
import datasets
import translation_models.model as tmm
import translation_models.help_fun as transl_hf
import onmt
import model_manager
import quantization
import copy
import functools
import quantization.help_functions as qhf
import helpers.functions as mhf
cuda_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
print('CUDA_VISIBLE_DEVICES: {} for a total of {}'.format(cuda_devices, len(cuda_devices)))
datasets.BASE_DATA_FOLDER = '...'
SAVED_MODELS_FOLDER = '...'
USE_CUDA = torch.cuda.is_available()
NUM_GPUS = len(cuda_devices)
TRAIN_TEACHER_MODEL = False
TRAIN_SMALLER_MODEL = False
TRAIN_SEQUENCE_DISTILLED = False
TRAIN_WORD_DISTILLED = False
TRAIN_QUANTIZED_DISTILLED = False
TRAIN_DIFFERENTIABLE_QUANTIZATION = False
COMPUTE_BLEU_MODELS = True
CHECK_PM_QUANTIZATION = True
try:
os.mkdir(datasets.BASE_DATA_FOLDER)
except:pass
try:
os.mkdir(SAVED_MODELS_FOLDER)
except:pass
epochsToTrainOnmtIntegDataset = 15
onmtManager = model_manager.ModelManager('model_manager_WMT13.tst',
'model_manager', create_new_model_manager=False)
for x in onmtManager.list_models():
if onmtManager.get_num_training_runs(x) > 0:
print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1])
WMT13_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'WMT13')
try:
os.mkdir(WMT13_saved_models_folder)
except:pass
#load the data
batch_size = 64 * NUM_GPUS
if batch_size % NUM_GPUS != 0:
raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS))
transl_dataset = datasets.WMT13_DE_EN(pin_memory=True)
train_loader, test_loader = transl_dataset.getTrainLoader(batch_size), transl_dataset.getTestLoader(batch_size)
#Teacher model
teacherOptions = copy.deepcopy(onmt.standard_options.stdOptions)
#it only matter in the creation of the distillation dataset
teacherOptions['rnn_size'] = 500
teacherOptions['epochs'] = epochsToTrainOnmtIntegDataset
teacherModel_name = 'WMT13_teacherModel'
teacherModelPath = os.path.join(WMT13_saved_models_folder, teacherModel_name)
teacherModel = tmm.create_model(transl_dataset.fields, options=teacherOptions)
if USE_CUDA: teacherModel = teacherModel.cuda()
if teacherModel_name not in onmtManager.saved_models:
onmtManager.add_new_model(teacherModel_name, teacherModelPath,
arguments_creator_function=teacherOptions)
if TRAIN_TEACHER_MODEL:
onmtManager.train_model(teacherModel, model_name=teacherModel_name,
train_function=tmm.train_model,
arguments_train_function={'options':teacherOptions},
train_loader=train_loader, test_loader=test_loader)
if onmtManager.get_num_training_runs(teacherModel_name) > 0:
teacherModel.load_state_dict(onmtManager.load_model_state_dict(teacherModel_name))
standardTranslateOptions = onmt.standard_options.standardTranslationOptions
# Smaller model with 1 LSTM layers (1 encoder, 1 for decoder, so in total 2)
# with 500 rnn size (just like the teacher)
#smaller model
smallerOptions = copy.deepcopy(onmt.standard_options.stdOptions)
#if not specified, it was trained with 2 layers (2 for encoder and 2 for decoder, that is) with rnn size of 200
smallerOptions['batch_size'] = batch_size
smallerOptions['rnn_size'] = 500
smallerOptions['layers'] = 1
smallerOptions['epochs'] = 5
smaller_model_name = 'WMT13_smallerModel_{}rnn_size1_layer_5epochs'.format(500)
smallerModelPath = os.path.join(WMT13_saved_models_folder, smaller_model_name)
smallerModel = tmm.create_model(transl_dataset.fields, options=smallerOptions)
if USE_CUDA: smallerModel = smallerModel.cuda()
if smaller_model_name not in onmtManager.saved_models:
onmtManager.add_new_model(smaller_model_name, smallerModelPath,
arguments_creator_function=smallerOptions)
if TRAIN_SMALLER_MODEL:
onmtManager.train_model(smallerModel, model_name=smaller_model_name,
train_function=tmm.train_model,
arguments_train_function={'options':smallerOptions},
train_loader=train_loader, test_loader=test_loader)
if onmtManager.get_num_training_runs(smaller_model_name) > 0:
smallerModel.load_state_dict(onmtManager.load_model_state_dict(smaller_model_name))
del smallerModel
#Just distilled
distilledOptions = copy.deepcopy(smallerOptions)
distilledOptions['rnn_size'] = 550
distilledOptions['layers'] = 1
distilledOptions['epochs'] = 5
distilled_model_name = 'WMT13_distilledModel_word_level_{}rnn_size1_layer'.format(550)
distilled_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions)
if USE_CUDA: distilled_model_word_level = distilled_model_word_level.cuda()
distilledModelPath = os.path.join(WMT13_saved_models_folder, distilled_model_name)
if distilled_model_name not in onmtManager.saved_models:
onmtManager.add_new_model(distilled_model_name, distilledModelPath,
arguments_creator_function=distilledOptions)
if TRAIN_WORD_DISTILLED:
onmtManager.train_model(distilled_model_word_level, model_name=distilled_model_name,
train_function=tmm.train_model,
arguments_train_function={'options':distilledOptions,
'teacher_model': teacherModel,
'use_distillation_loss':True},
train_loader=train_loader, test_loader=test_loader)
if onmtManager.get_num_training_runs(distilled_model_name) > 0:
distilled_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name))
del distilled_model_word_level
# For the distilled quantized model we increase the rnn size; sort of like increasing filters
distilledOptions = copy.deepcopy(smallerOptions)
distilledOptions['rnn_size'] = 550
distilledOptions['epochs'] = 5
distilled_model_name_quantized = 'WMT13_distilledModel_word_level_quantized{}bits{}rnn_size1_layer'.format(
2, 550)
distilled_quantized_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions)
if USE_CUDA: distilled_quantized_model_word_level = distilled_quantized_model_word_level.cuda()
distilledModelPath = os.path.join(WMT13_saved_models_folder, distilled_model_name_quantized)
if distilled_model_name_quantized not in onmtManager.saved_models:
onmtManager.add_new_model(distilled_model_name_quantized, distilledModelPath,
arguments_creator_function=distilledOptions)
if TRAIN_QUANTIZED_DISTILLED:
onmtManager.train_model(distilled_quantized_model_word_level, model_name=distilled_model_name_quantized,
train_function=tmm.train_model,
arguments_train_function={'options':distilledOptions,
'teacher_model': teacherModel,
'use_distillation_loss':True,
'quantizeWeights':True,
'numBits': 2,
'bucket_size':256,
'quantize_first_and_last_layer':False},
train_loader=train_loader, test_loader=test_loader)
if onmtManager.get_num_training_runs(distilled_model_name_quantized) > 0:
distilled_quantized_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_quantized))
del distilled_quantized_model_word_level
#print bleu for the models
example_translations=False
file_results = 'results_file_BLEU_models_WMT13'
if COMPUTE_BLEU_MODELS:
with open(file_results, 'a') as fr:
fr.write('\n\n== New Testing Run == 29 Dec 2017 == \n\n')
for x in onmtManager.list_models():
if onmtManager.get_num_training_runs(x) == 0:
continue
modelOptions = onmtManager.load_metadata(x, 0)[0]
for key, val in modelOptions.items(): #remeding to an old bug in save_metadata function
if val == 'None':
modelOptions[key] = None
dataset = transl_dataset
model = tmm.create_model(dataset.fields, options=modelOptions)
if USE_CUDA: model = model.cuda()
model.load_state_dict(onmtManager.load_model_state_dict(x, 1))
if example_translations:
print('Example of translation for model: "{}"'.format(x))
num_examples = 5
linesToTranslate, translated_lines, referenceLines = transl_hf.get_translation_examples(model,
dataset,
num_examples,
modelOptions,
standardTranslateOptions,
shuffle_examples=False)
print('Original Sentences == Translation == Ref Translation')
print('\n'.join(' == '.join(x) for x in zip(linesToTranslate, translated_lines, referenceLines)))
if COMPUTE_BLEU_MODELS:
bleu = transl_hf.get_bleu_model(model, dataset, modelOptions, standardTranslateOptions)
else:
bleu = 'Not computed'
perplexity = onmtManager.load_metadata(x,1)[1]['perplexity'][-1]
str_to_save = 'Model "{}" ==> Perplexity: {}, BLEU: {}'.format(x, perplexity, bleu)
if COMPUTE_BLEU_MODELS:
with open(file_results, 'a') as fr:
fr.write(str_to_save + '\n')
print(str_to_save)
curr_num_bit = onmtManager.load_metadata(x)[0].get('numBits', None)
if curr_num_bit is not None:
quant_fun = functools.partial(quantization.uniformQuantization, s=2**curr_num_bit, bucket_size=256)
actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun,
'uniform', s=2**curr_num_bit)
print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman,
mhf.get_size_reduction(actual_bit_huffmman, bucket_size=256)))
print('Size MB: {}'.format(mhf.get_size_quantized_model(model, curr_num_bit, quant_fun, 256, quantizeFirstLastLayer=False)))
if CHECK_PM_QUANTIZATION:
QUANTIZE_FIRST_LAST_LAYER = True
if 'distilledModel_word_level' in x:
for numBit in [2]:
for bucket_size in (None, 256):
model.load_state_dict(onmtManager.load_model_state_dict(x, 1))
numParam = sum(1 for _ in model.parameters())
for idx, p in enumerate(model.parameters()):
if QUANTIZE_FIRST_LAST_LAYER is False:
if idx == 0 or idx == numParam - 1:
continue
p.data = quantization.uniformQuantization(p.data, s=2**numBit, type_of_scaling='linear',
bucket_size=bucket_size)[0]
perplexity = tmm.evaluate_model(model, test_loader).ppl()
if COMPUTE_BLEU_MODELS:
bleu = transl_hf.get_bleu_model(model, dataset, modelOptions, standardTranslateOptions)
else:
bleu = 'Not Computed'
str_to_save = 'PM quantization of model "{}" with "{}" bits and bucket size {}: Perplexity : {}, BLEU: {}'.format(
x, numBit, bucket_size, perplexity, bleu)
quant_fun = functools.partial(quantization.uniformQuantization, s=2**numBit, bucket_size=bucket_size)
actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun,
'uniform', s=2**numBit)
size_reduction = mhf.get_size_reduction(actual_bit_huffmman, bucket_size=bucket_size)
size_mb = mhf.get_size_quantized_model(model, numBit, quant_fun, bucket_size, quantizeFirstLastLayer=QUANTIZE_FIRST_LAST_LAYER)
str_to_save += '\n' + 'Effective bit Huffman: {} - Size reduction: {} - Size MB: {}'.format(actual_bit_huffmman,size_reduction, size_mb)
if COMPUTE_BLEU_MODELS:
with open(file_results, 'a') as fr:
fr.write(str_to_save + '\n')
print(str_to_save)
#now for the models trained with the differentiable quantization algorithm
# list_distilled_models = ['WMT13_distilledModel_word_level_{}rnn_size1_layer'.format(x)
# for x in rnn_sizes]
# optQuanPointOptions = copy.deepcopy(onmt.onmt.standard_options.stdOptions)
# for idx_model_distilled, distilled_model_name_to_quantize in enumerate(list_distilled_models):
# modelOptions = onmtManager.load_metadata(distilled_model_name_to_quantize, 0)[0]
# for key, val in modelOptions.items(): # remeding to an old bug in save_metadata function
# if val == 'None':
# modelOptions[key] = None
# dataset = transl_dataset #since we don't use sequence level distillation
# for numBit in numBits:
# if numBit == 8: continue
# save_path = onmtManager.get_model_base_path(distilled_model_name_to_quantize) + \
# 'quant_points_{}bit_bucket_size256'.format(numBit)
# with open(save_path, 'rb') as p:
# quantization_points, infoDict = pickle.load(p)
# distilledModel = tmm.create_model(dataset.fields, options=modelOptions)
# distilledModel.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_to_quantize))
# if USE_CUDA: distilledModel = distilledModel.cuda()
# for idx, p in enumerate(distilledModel.parameters()):
# p.data = quantization.nonUniformQuantization(p.data, quantization_points[idx], bucket_size=256)[0]
# reported_perplexity = infoDict['perplexity'][-1]
# perplexity = tmm.evaluate_model(distilledModel, test_loader).ppl()
# if COMPUTE_BLEU_MODELS:
# bleu = transl_hf.get_bleu_model(distilledModel, dataset, optQuanPointOptions, standardTranslateOptions)
# else:
# bleu = 'Not Computed'
# str_to_save = 'Model "{}" ==> Reported perplexity : {}, Actual perplexity: {}, BLEU: {}'.format(
# distilled_model_name_to_quantize + 'quant_points_{}bit_bucket_size256'.format(numBit),
# reported_perplexity, perplexity, bleu)
# if COMPUTE_BLEU_MODELS:
# with open(file_results, 'a') as fr:
# fr.write(str_to_save + '\n')
# print(str_to_save)
#
# quantization_functions = [functools.partial(quantization.nonUniformQuantization,
# listQuantizationPoints=qp,
# bucket_size=256) for qp in quantization_points]
# actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(distilledModel.parameters(),
# quantization_functions,
# 'nonUniform')
# print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman,
# mhf.get_size_reduction(
# actual_bit_huffmman,
# bucket_size=256)))
|
11583927
|
from django.http import HttpResponse
from django.core.exceptions import ValidationError
from django.db import transaction
from systems.models import System
from core.range.utils import range_usage
from core.search.compiler.django_compile import compile_to_q
from core.range.ip_choosing_utils import (
integrate_real_ranges, calc_template_ranges
)
from core.network.models import Network
from bulk_action.import_utils import loads, dumps, system_import, BadImportData
from MySQLdb import OperationalError
import MySQLdb
import pprint
import simplejson as json
def bulk_import(main_blob, load_json=True):
try:
if load_json:
json_blob = loads(main_blob)
else:
json_blob = main_blob
except ValueError, e: # Can't find JSONDecodeError
return None, {'errors': str(e)}
try:
systems = json_blob['systems']
except (KeyError, TypeError):
return None, {'errors': 'Main JSON needs to have a key "systems".'}
commit = json_blob.get('commit', False)
if not isinstance(systems, dict):
return None, {'errors': 'Main JSON blob must be a dict of systems'}
@transaction.commit_manually
def do_import():
try:
for i, s_blob in enumerate(systems.values()):
save_functions = sorted(
system_import(s_blob), key=lambda f: f[0]
)
for priority, fn in save_functions:
fn()
except BadImportData, e:
transaction.rollback()
return None, {
'errors': 'Found an issue while processing system #{0}'
'blob: {1}\nBad blob was:\n{2}'.format(
i, e.msg, e.bad_blob
)
}
except ValidationError, e:
transaction.rollback()
field_errors = ''
if hasattr(e, 'message_dict'):
for field, errors in e.message_dict.iteritems():
field_errors += "{0}: {1} ".format(field, ' '.join(errors))
else:
field_errors = ', '.join(e.messages)
transaction.rollback()
return None, {
'errors': 'Found an issue while processing system #{0}: '
'{field_errors}'.format(i, field_errors=field_errors), # noqa
'blob': s_blob,
'blob_number': i
}
except MySQLdb.Warning, e:
transaction.rollback()
return None, {
'errors': (
'There was an error while processing system number #{0}: '
'{error}.'.format(i, error=e.message)
),
'blob': s_blob,
'blob_number': i
}
except Exception, e:
transaction.rollback()
return None, {
'errors': 'Please tell someone about this error: {0}'.format(e), # noqa
'blob': s_blob,
'blob_number': i
}
else:
if commit:
transaction.commit()
else:
transaction.rollback()
return {'systems': systems}, None
return do_import()
def bulk_action_import(request):
raw_data = request.raw_post_data
if not raw_data:
return HttpResponse(dumps({'errors': 'what do you want?'}))
systems, errors = bulk_import(raw_data)
return HttpResponse(json.dumps(systems or errors))
def bulk_action_export(request):
search = request.GET.get('q', '')
if not search:
return HttpResponse(dumps({'errors': 'what do you want?'}))
q_map, errors = compile_to_q(search)
if errors:
return HttpResponse(dumps({'errors': errors}))
try: # We might have to catch shitty regular expressions
bundles = System.get_bulk_action_list(q_map['SYS'])
except OperationalError as why:
return HttpResponse(dumps({'error_messages': str(why)}))
pprint.pprint(bundles)
return HttpResponse(dumps({'systems': bundles}))
def bulk_gather_vlan_pools(request):
vlan_name = request.GET.get('vlan_name', None)
vlan_number = request.GET.get('vlan_number', None)
site_name = request.GET.get('site_name', None)
ip_type = request.GET.get('ip_type', None)
if not site_name:
return HttpResponse(dumps({
'errors': 'Site name was not provided'
}))
if not ip_type:
return HttpResponse(dumps({
'errors': 'IP type is required here.'
}))
if vlan_name and vlan_number:
s = 'site=:{site_name} AND vlan=:{vlan_name},{vlan_number}'.format(
site_name=site_name, vlan_name=vlan_name, vlan_number=vlan_number
)
elif vlan_name:
s = 'site=:{site_name} AND vlan=:{vlan_name}'.format(
site_name=site_name, vlan_name=vlan_name
)
elif vlan_number:
s = 'site=:{site_name} AND vlan=:{vlan_number}'.format(
site_name=site_name, vlan_number=vlan_number
)
else:
return HttpResponse(dumps({
'errors': 'Not enough vlan information was provided'
}))
q_map, errors = compile_to_q(s)
if errors:
return None, errors
networks = Network.objects.filter(q_map['NET']).filter(ip_type=ip_type)
if networks.count() > 1:
return HttpResponse(dumps({
'errors': "Using the search '{s}', too many networks were "
"found. Please be more specific and specify a range.".format(s=s)
}))
if not networks.count():
return HttpResponse(dumps({
'errors': "Using the search '{s}', no networks were "
"found.".format(s=s)
}))
ranges = integrate_real_ranges(
networks[0], calc_template_ranges(networks[0])
)
free_ranges = []
for r in ranges:
if r['rtype'] == 'special purpose':
continue
free_ranges += range_usage(
r['start'], r['end'], ip_type
)['free_ranges']
return HttpResponse(dumps({
'free_ranges': free_ranges
}))
|
11583931
|
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import os
import pickle
import sys
import traceback
from io import open
from fabric.api import env, execute
from fabric.operations import sudo
from github import UnknownObjectException
from memoized import memoized_property
from .const import (
CACHED_DEPLOY_CHECKPOINT_FILENAME,
CACHED_DEPLOY_ENV_FILENAME,
PROJECT_ROOT,
)
from commcare_cloud.github import github_repo
def execute_with_timing(fn, *args, **kwargs):
start_time = datetime.datetime.utcnow()
execute(fn, *args, **kwargs)
if env.timing_log:
with open(env.timing_log, 'a', encoding='utf-8') as timing_log:
duration = datetime.datetime.utcnow() - start_time
timing_log.write('{}: {}\n'.format(fn.__name__, duration.seconds))
def get_pillow_env_config():
full_host = env.get('host_string')
if full_host and '.' in full_host:
host = full_host.split('.')[0]
pillows = {}
pillows.update(env.pillows.get(host, {}))
pillows.update(env.pillows.get(full_host, {}))
return pillows
def _get_checkpoint_filename():
return '{}_{}'.format(env.deploy_env, CACHED_DEPLOY_CHECKPOINT_FILENAME)
def _get_env_filename(env_name):
return '{}_{}'.format(env_name, CACHED_DEPLOY_ENV_FILENAME)
def cache_deploy_state(command_index):
with open(os.path.join(PROJECT_ROOT, _get_checkpoint_filename()), 'wb') as f:
pickle.dump(command_index, f)
with open(os.path.join(PROJECT_ROOT, _get_env_filename(env.deploy_env)), 'wb') as f:
pickle.dump(env, f)
def clear_cached_deploy():
os.remove(os.path.join(PROJECT_ROOT, _get_checkpoint_filename()))
os.remove(os.path.join(PROJECT_ROOT, _get_env_filename(env.deploy_env)))
def retrieve_cached_deploy_env(env_name):
filename = os.path.join(PROJECT_ROOT, _get_env_filename(env_name))
return _retrieve_cached(filename)
def retrieve_cached_deploy_checkpoint():
filename = os.path.join(PROJECT_ROOT, _get_checkpoint_filename())
return _retrieve_cached(filename)
def _retrieve_cached(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def traceback_string():
exc_type, exc, tb = sys.exc_info()
trace = "".join(traceback.format_tb(tb))
return "Traceback:\n{trace}{type}: {exc}".format(
trace=trace,
type=exc_type.__name__,
exc=exc,
)
def pip_install(cmd_prefix, requirements, timeout=None, quiet=False, proxy=None, no_index=False,
wheel_dir=None):
parts = [cmd_prefix, 'pip install']
if timeout is not None:
parts.append('--timeout {}'.format(timeout))
if quiet:
parts.append('--quiet')
for requirement in requirements:
parts.append('--requirement {}'.format(requirement))
if proxy is not None:
parts.append('--proxy {}'.format(proxy))
if no_index:
parts.append('--no-index')
if wheel_dir is not None:
parts.append('--find-links={}'.format(wheel_dir))
sudo(' '.join(parts))
def generate_bower_command(command, production=True, config=None):
parts = ['bower', command]
if production:
parts.append('--production')
if config:
for key, value in config.items():
parts.append('--config.{}={}'.format(key, value))
return ' '.join(parts)
def bower_command(command, production=True, config=None):
cmd = generate_bower_command(command, production, config)
sudo(cmd)
|
11583935
|
from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
from getting_realtime_data.data_retrieval import Data_Retrieval
from sending_orders.order_management import Order_Management
from management_of_position.position_management import Position_Management
from predefined_functions.initialisation import Initialisation
from get_data.get_market_data import Get_Market_Data
import time
from datetime import datetime, timedelta, date
from predefined_functions.defined_functionality import Defined_Functionality
import traceback
import pandas as pd
import sys
import math
# the newest one where you make market order base on price movements 5 or more and try to catch the trend
class Algo0:
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.df = Defined_Functionality()
self.map_epic_data_minute={}
# this way we can record the time the data was take for a particular instrument
self.first_timestamp = {}
self.high = None
self.low = None
self.time_interval = 5
self.dataframe = self.setup_dataframe()
self.number_orders = 40
def setup_dataframe(self):
# add this to this machine
path = "D:/Stock_Analysis/ig-markets-api-python-library-master/Data/SpreadBetting/All_instrument_data_added.csv"
dataframe = pd.read_csv(path)
dataframe.rename(columns={"Unnamed: 0": "Number"},inplace=True)
dataframe = dataframe.set_index("Number")
sorted_data = dataframe
sorted_data["average"] = (sorted_data["bid"] + sorted_data["offer"]) / 2.0
sorted_data["spread"] = sorted_data["offer"] - sorted_data["bid"]
sorted_data["diff_high-low"] = sorted_data["high"] - sorted_data["low"]
sorted_data["percent_spread-hl"] = (sorted_data["spread"] / sorted_data["diff_high-low"] * 100)
# the 1 at the end is the position size we will be taking
sorted_data["margin_required_simple"] = sorted_data["average"] * (sorted_data["marginFactor"]/100) * 1
# FTSE indice instrument - don't allow you to close your positions
sorted_data["percent_spread_offer"] = sorted_data["spread"] / sorted_data["offer"] * 100
# sorted_data = sorted_data[~((sorted_data["name"].str.contains("CALL")) | (sorted_data["name"].str.contains("PUT")) | (sorted_data["name"].str.contains("Leverage")) | (sorted_data["name"].str.contains("Boost")) | (sorted_data["name"].str.contains("FTSE")) )]
# could look into options when they are about to expire the price movements for them should be high
sorted_data = sorted_data[~( (sorted_data["name"].str.contains("FTSE")) )]
sorted_data = sorted_data[(sorted_data["percent_spread_offer"] != 0)]
sorted_data = sorted_data.sort_values(by="percent_spread_offer")
sorted_data = sorted_data[(sorted_data["percent_spread_offer"] < 0.05)]
# final_table = sorted_data[(sorted_data["margin_required_simple"] != 0)
# & (sorted_data["margin_required_simple"] < 10000)
# & (sorted_data["percent_spread-hl"] < 10)
# & (sorted_data["percent_spread-hl"] > 0 )
# & (sorted_data["percent_spread_offer"] > 0)
# & (sorted_data["percent_spread_offer"] < 0.05)]
final_table = sorted_data[(sorted_data["margin_required_simple"] < 1000)
& (sorted_data["percent_spread_offer"] < 0.3)]
return final_table
def setup(self):
self.df.update_stop_level_bands()
def run(self):
while(True):
try:
# setup is running in another program
# self.setup()
start_time = datetime.now()
self.check_all_working_orders()
details_required_to_create_orders = self.signal_generation()
self.create_orders(required_order_details = details_required_to_create_orders)
# for epic in self.map_epic_data_minute.keys():
end_time = datetime.now() - start_time
print(end_time)
except Exception as e:
print(e, " error in the looping for the defined_functionality")
traceback.print_exc()
def check_all_working_orders(self):
# check if opening market has passed , if we have a position in it then close all other orders, or if only one of that order remains
orders = self.df.get_working_orders()
for epic in orders["epic"].unique():
self.checking_working_order_is_present_under_epic_and_amend_orders(epic=epic)
# all sessions mean the market opens at 9 am
def create_orders(self, required_order_details):
if required_order_details.index.size == 0:
return
map_of_orders = None
for index in range(required_order_details.index.size):
orders = self.df.get_working_orders()
if (orders.index.size) >= self.number_orders:
# print("order limit met")
return map_of_orders
single_detail = required_order_details.iloc[index]
epic = single_detail["epic"]
position = self.df.find_open_position_by_epic(epic=epic)
if len(position) != 0:
# make sure you do no have order under a position
self.df.cancel_orders_by_epic(epic=epic)
print(position[0])
continue
# as we are placing a buy above the quotes as we are betting
# force open == True here means when you place a trade of the opposite type your position will not close when the price meets it, another position will open
order_buy = self.df.create_working_order(epic=epic, direction="BUY", price_order_level=single_detail["ask"], size=single_detail["size"], force_open=True)
# we are placing sell orders below the quotes as we are betting when the price goes down
order_sell = self.df.create_working_order(epic=epic, direction="SELL", price_order_level=single_detail["bid"], size=single_detail["size"],force_open=True)
map_of_orders = {
epic: [order_buy, order_sell]
}
#check there are two orders for each epic
orders_present = self.checking_working_order_is_present_under_epic_and_amend_orders(epic=epic)
# if you don't have enough funds then don't make more trades
if orders_present == False:
for single_order in map_of_orders[epic]:
if (single_order == None) or (single_order["reason"] == 'INSUFFICIENT_FUNDS'):
# double check that all those orders under that epic were delete as IG api can miss it
print("delete orders")
self.df.cancel_orders_by_epic(epic=epic)
return map_of_orders
return map_of_orders
def checking_working_order_is_present_under_epic_and_amend_orders(self, epic):
# check there are two orders for each epic
created_order = True
while(True):
orders = self.df.get_working_orders_by_epic(epic=epic)
if orders.index.size == 2:
directions = orders["direction"].to_list()
if ("BUY" in directions and "SELL" in directions):
data = self.df.get_market_data(epic=epic)
if data == None:
continue
# market is still open
map_of_time = data["instrument"]["openingHours"]
if map_of_time == None:
break
single_time = map_of_time["marketTimes"][0]["openTime"]
opening_market_time = datetime.strptime(single_time, '%H:%M').time()
diff_in_time = datetime.combine(date.today(), opening_market_time) - datetime.now()
# if the time is bigger than x second then we can still put orders in place
total_seconds = diff_in_time.total_seconds()
# -1 seconds
if total_seconds < -(0.2):
# delete the orders if any are pending
break
position = self.df.find_open_position_by_epic(epic=epic)
if len(position) != 0:
break
print("market is not open yet and orders are in place nothing needs to be done yet")
return created_order
break
# print("delete orders")
self.df.cancel_orders_by_epic(epic=epic)
created_order = False
return created_order
def signal_generation(self):
data = self.dataframe
final_df = pd.DataFrame()
# filtering the dataframe
times = data["openingHours"].unique()
times_list = times.tolist()
for index in range(len(times_list)):
# removing nan values
if not isinstance(times_list[index], str):
del(times_list[index])
break
times_list.sort()
nearest_time_interval = None
previous_time = sys.maxsize
details_to_create_orders_for = []
for interval in times_list:
map_of_time = eval(interval)
single_time = map_of_time["marketTimes"][0]["openTime"]
opening_market_time = datetime.strptime(single_time, '%H:%M').time()
diff_in_time = datetime.combine(date.today(), opening_market_time) - datetime.now()
# if the time is bigger than x second then we can still put orders in place
total_seconds = diff_in_time.total_seconds()
if previous_time < total_seconds:
break
# 5 minutes before the market opens, orders can be placed
if total_seconds > 300:
nearest_time_interval = single_time
previous_time = diff_in_time.total_seconds()
if nearest_time_interval != None:
sub_data = data[data["openingHours"].str.contains("{'openTime': '" + nearest_time_interval, na=False)]
# """ for some reason Ig think these markets open again """
single_time = "09:00"
time_interval_limit = datetime.strptime(single_time, '%H:%M').time()
diff_in_time = datetime.combine(date.today(), time_interval_limit) - datetime.now()
if diff_in_time.total_seconds() < 0:
sub_data = sub_data[~((sub_data["name"].str.contains("All Sessions")) )]
sub_data = sub_data.sort_values(by="percent_spread_offer", ascending=True)
# also maybe remove NON DFB instruments
# sort the dataframe by percentage spread and margin required, that way larger moving instruments with low prices are chosen first
for index in range(sub_data["epic"].size):
epic = sub_data.iloc[index]["epic"]
orders_present = self.checking_working_order_is_present_under_epic_and_amend_orders(epic=epic)
if orders_present == True:
continue
try:
price_data = self.df.get_market_data(epic=epic)
except Exception as e:
print(e)
continue
if price_data == None: continue
ask = price_data["snapshot"]["offer"]
bid = price_data["snapshot"]["bid"]
delay_time = price_data["snapshot"]["delayTime"]
# min dealing size
if price_data["dealingRules"]["minDealSize"]["unit"] == "POINTS":
smallest_size = price_data["dealingRules"]["minDealSize"]["value"]
else:
smallest_size = 1
# getting the margin factor
if price_data["instrument"]["marginFactorUnit"] == 'PERCENTAGE':
if price_data["instrument"]["marginFactor"] == 0:
factor = 20
else:
factor = price_data["instrument"]["marginFactor"]
margin_factor = factor/100.0
else:
margin_factor = price_data["instrument"]["marginFactorUnit"]
spread = ask - bid
if delay_time > 0:
continue
if spread == 0:
data = self.df.get_historical_data_via_num_points(epic=epic,resolution="1Min", num_points=1)
if not isinstance(data, pd.core.frame.DataFrame):
continue
if not ("last" in data and "bid" in data):
continue
ask = data["last"]["Close"].max()
bid = data["bid"]["Close"].max()
if (math.isnan(ask) or math.isnan(bid)):
continue
# ideal size margin - original was : 200 - stay with 200 , 400 doesn't have the same affect
ideal_margin_for_each_order = 200
spread = ask - bid
spread_to_quotes = (spread / ask)*100
average = (ask+bid) /2.0
margin_required = smallest_size * average * margin_factor
if margin_factor == 0 or average == 0 or smallest_size == 0:
print("stop")
size_required = (ideal_margin_for_each_order/margin_factor)/average
size_required = round(size_required,2)
if (size_required < smallest_size):
continue
# we are increasing the size to see if the profit increases if the position size increases whilst keeping the instruments the same
size_required *= 2
# was 0.1
if spread_to_quotes < 0.3:
# info might not be useful at all ----------------------------------------------------------------- -do some work on this
# data = self.df.get_historical_data_via_num_points(epic=epic, resolution="1D", num_points=20)
#
# if not isinstance(data, pd.core.frame.DataFrame):
# continue
#
# if not ("last" in data and "bid" in data):
# continue
# data["bid", "previous_day_close"] = data["bid"]["Close"].shift(1)
# data["bid", "diff_of_price_change"] = (data["bid", "Open"] - data["bid", "previous_day_close"])
# # 1 used to get rid of nan value at the front
# positive_diff = abs(data["bid", "diff_of_price_change"][1:])
# does_price_move_more_than_spread = (positive_diff > (spread)).all()
#
# if does_price_move_more_than_spread == False:
# continue
# - might not be useful at all -------------------------------------------------------------------
#instrument is good to trade
# 4 weeks of data
"""need to change the end number to 20"""
data = self.df.get_historical_data_via_num_points(epic=epic, resolution="1D", num_points=20)
if not isinstance(data, pd.core.frame.DataFrame):
continue
if not ("last" in data and "bid" in data):
continue
data["bid", "previous_day_close"] = data["bid"]["Close"].shift(1)
data["bid", "diff_of_price_change"] = (data["bid", "Open"] - data["bid", "previous_day_close"])
# 1 used to get rid of nan value at the front
positive_diff = abs(data["bid", "diff_of_price_change"][1:])
# percentage_opening_moving_larger_than_spread = (positive_diff[positive_diff > (spread)].index.size / positive_diff.index.size) * 100
"""we are timings spread by 2 here """
percentage_opening_moving_larger_than_spread = (positive_diff[positive_diff > (spread * 1.1)].index.size / positive_diff.index.size) * 100
print(percentage_opening_moving_larger_than_spread)
# if the percentage is bigger than 80% then we can say this happens frequently and so we can place working orders here
# if percentage_opening_moving_larger_than_spread < 90:
# continue
# making the spread wider use to be 10 - dividing by a smaller number means the spread is larger
bid = bid - (spread/5)
ask = ask + (spread/5)
object_map ={
"epic": epic,
"bid": bid,
"ask": ask,
# to make the sizes for this larger but still keep these small instruments
"size": size_required,
"percent_opening_close": percentage_opening_moving_larger_than_spread
}
details_to_create_orders_for.append(object_map)
# # this part is taking way to long to go through too many instruments ---------------------------------------------------
diff_in_time = datetime.combine(date.today(), opening_market_time) - datetime.now()
# if there is 2 minutes left until the market opens then stop placeing orders incase the section above take too long to process
if diff_in_time.total_seconds() > 120:
final_df = pd.DataFrame(details_to_create_orders_for)
if final_df.index.size == 0:
return final_df
final_df = final_df.sort_values(by="percent_opening_close", ascending=False)
final_df = final_df[:self.number_orders]
# temp_df = temp_df[temp_df["percent_opening_close"] > 60]
return final_df
|
11583938
|
import contextlib
import itertools
import numpy as np
import os
import sys
try:
import pycocotools.coco
import pycocotools.cocoeval
import pycocotools.mask as mask_tools
_available = True
except ImportError:
_available = False
import fcis
def eval_instance_segmentation_coco(generator):
"""Evaluate instance segmentation based on evaluation code of MS COCO.
Args:
sizes (iterable of tuple of ints): [(H_1, W_1), ..., (H_N, W_N)]
pred_bboxes (iterable of numpy.ndarray): An iterable of :math:`N`
sets of bounding boxes.
Its index corresponds to an index for the base dataset.
Each element of :obj:`pred_bboxes` is a set of coordinates
of bounding boxes. This is an array whose shape is :math:`(R, 4)`,
where :math:`R` corresponds
to the number of bounding boxes, which may vary among boxes.
The second axis corresponds to :obj:`y_min, x_min, y_max, x_max`
of a bounding box.
pred_masks (iterable of list of numpy.ndarray)
pred_labels (iterable of numpy.ndarray): An iterable of labels.
Similar to :obj:`pred_bboxes`, its index corresponds to an
index for the base dataset. Its length is :math:`N`.
pred_scores (iterable of numpy.ndarray): An iterable of confidence
scores for predicted bounding boxes. Similar to :obj:`pred_bboxes`,
its index corresponds to an index for the base dataset.
Its length is :math:`N`.
gt_bboxes (iterable of numpy.ndarray): An iterable of ground truth
bounding boxes
whose length is :math:`N`. An element of :obj:`gt_bboxes` is a
bounding box whose shape is :math:`(R, 4)`. Note that the number of
bounding boxes in each image does not need to be same as the number
of corresponding predicted boxes.
gt_masks (iterable of list of numpy.ndarray)
gt_labels (iterable of numpy.ndarray): An iterable of ground truth
labels which are organized similarly to :obj:`gt_bboxes`.
gt_crowdeds (iterable of numpy.ndarray): An iterable of boolean
arrays which is organized similarly to :obj:`gt_bboxes`.
This tells whether the "crowded" label is assigned to the
corresponding bounding boxes.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not crowded.
gt_area (iterable of numpy.ndarray): An iterable of float
arrays which is organized similarly to :obj:`gt_bboxes`.
This contains the area of the instance mask of an object
for each bounding box. By default, this is :obj:`None`.
In that case, this function uses the area of the
bounding box (i.e. width multiplied by height).
"""
if not _available:
raise ValueError(
'Please install pycocotools \n'
'pip install -e \'git+https://github.com/pdollar/coco.git'
'#egg=pycocotools&subdirectory=PythonAPI\'')
gt_coco = pycocotools.coco.COCO()
pred_coco = pycocotools.coco.COCO()
images = list()
pred_anns = list()
gt_anns = list()
unique_labels = dict()
for i, size, pred_bbox, pred_mask, pred_label, pred_score, \
gt_bbox, gt_mask, gt_label, gt_crowded, gt_area in generator:
pred_whole_mask = fcis.utils.mask2whole_mask(
pred_mask, pred_bbox, size)
gt_whole_mask = fcis.utils.mask2whole_mask(gt_mask, gt_bbox, size)
if gt_area is None:
gt_area = itertools.repeat(None)
if gt_crowded is None:
gt_crowded = itertools.repeat(None)
# Starting ids from 1 is important when using COCO.
img_id = i + 1
for pred_whole_m, pred_lbl, pred_sc in zip(
pred_whole_mask, pred_label, pred_score):
pred_anns.append(
_create_ann(pred_whole_m, pred_lbl, pred_sc,
img_id=img_id, ann_id=len(pred_anns) + 1,
crw=0))
unique_labels[pred_lbl] = True
for gt_whole_m, gt_lbl, gt_crw, gt_ar in zip(
gt_whole_mask, gt_label, gt_crowded, gt_area):
gt_anns.append(
_create_ann(gt_whole_m, gt_lbl, None,
img_id=img_id, ann_id=len(gt_anns) + 1,
crw=gt_crw, ar=gt_ar))
unique_labels[gt_lbl] = True
images.append({'id': img_id, 'height': size[0], 'width': size[1]})
pred_coco.dataset['categories'] = [{'id': i} for i in unique_labels.keys()]
gt_coco.dataset['categories'] = [{'id': i} for i in unique_labels.keys()]
pred_coco.dataset['annotations'] = pred_anns
gt_coco.dataset['annotations'] = gt_anns
pred_coco.dataset['images'] = images
gt_coco.dataset['images'] = images
with _redirect_stdout(open(os.devnull, 'w')):
pred_coco.createIndex()
gt_coco.createIndex()
ev = pycocotools.cocoeval.COCOeval(gt_coco, pred_coco, 'segm')
ev.evaluate()
ev.accumulate()
results = {'coco_eval': ev}
p = ev.params
common_kwargs = {
'prec': ev.eval['precision'],
'rec': ev.eval['recall'],
'iou_threshs': p.iouThrs,
'area_ranges': p.areaRngLbl,
'max_detection_list': p.maxDets}
all_kwargs = {
'ap/iou=0.50:0.95/area=all/maxDets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.50/area=all/maxDets=100': {
'ap': True, 'iou_thresh': 0.5, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.75/area=all/maxDets=100': {
'ap': True, 'iou_thresh': 0.75, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.50:0.95/area=small/maxDets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ap/iou=0.50:0.95/area=medium/maxDets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ap/iou=0.50:0.95/area=large/maxDets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
'ar/iou=0.50:0.95/area=all/maxDets=1': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 1},
'ar/iou=0.50:0.95/area=all/maxDets=10': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 10},
'ar/iou=0.50:0.95/area=all/maxDets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
'ar/iou=0.50:0.95/area=small/maxDets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ar/iou=0.50:0.95/area=medium/maxDets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ar/iou=0.50:0.95/area=large/maxDets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
}
for key, kwargs in all_kwargs.items():
kwargs.update(common_kwargs)
metrics, mean_metric = _summarize(**kwargs)
results[key] = metrics
results['m' + key] = mean_metric
return results
def _create_ann(whole_m, lbl, sc, img_id, ann_id, crw=None, ar=None):
H, W = whole_m.shape
if crw is None:
crw = False
whole_m = np.asfortranarray(whole_m.astype(np.uint8))
rle = mask_tools.encode(whole_m)
# Surprisingly, ground truth ar can be different from area(rle)
if ar is None:
ar = mask_tools.area(rle)
ann = {
'image_id': img_id, 'category_id': lbl,
'segmentation': rle,
'area': ar,
'id': ann_id,
'iscrowd': crw}
if sc is not None:
ann.update({'score': sc})
return ann
def _summarize(
prec, rec, iou_threshs, area_ranges,
max_detection_list,
ap=True, iou_thresh=None, area_range='all',
max_detection=100):
a_idx = area_ranges.index(area_range)
m_idx = max_detection_list.index(max_detection)
if ap:
s = prec.copy() # (T, R, K, A, M)
if iou_thresh is not None:
s = s[iou_thresh == iou_threshs]
s = s[:, :, :, a_idx, m_idx]
else:
s = rec.copy() # (T, K, A, M)
if iou_thresh is not None:
s = s[iou_thresh == iou_threshs]
s = s[:, :, a_idx, m_idx]
s[s == -1] = np.nan
s = s.reshape((-1, s.shape[-1]))
valid_classes = np.any(np.logical_not(np.isnan(s)), axis=0)
class_s = np.nan * np.ones(len(valid_classes), dtype=np.float32)
class_s[valid_classes] = np.nanmean(s[:, valid_classes], axis=0)
if not np.any(valid_classes):
mean_s = np.nan
else:
mean_s = np.nanmean(class_s)
return class_s, mean_s
@contextlib.contextmanager
def _redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
|
11583971
|
from .expr import Expr
class LeafExpr(Expr):
"""Leaf expression base class."""
def has_return(self):
return False
LeafExpr.__module__ = "pyteal"
|
11584012
|
class AddAuthTokenMiddleware(object):
"""
Adds auth_token cookie to response
"""
def process_response(self, request, response):
if hasattr(request, 'user') and request.user and request.user.is_authenticated():
auth_token = request.user.auth_token
if auth_token:
response.set_cookie('auth_token', auth_token)
return response
|
11584013
|
import msvcrt
#检测键盘输入
def kb_hitchk():
Kb_hit =msvcrt.kbhit()
if Kb_hit :
Kb_return = ord(msvcrt.getch())
else :
Kb_return = 0
return Kb_return
#前端显示名称
config_webtitle = ''
#前端底部页面信息
config_webinfo = ''
#需监控前端设备地址列表文件,csv后缀,格式 "设备名称,设备地址,设备端口"
config_ipfile = './static/ip.csv'
#超时时间,建议默认1秒
config_timeout = 1
#生成的JSON文件路径
config_jsflie = './static/ip.json'
#用户名,密码
config_user='test'
config_passwd='<PASSWORD>'
|
11584018
|
import argparse
import torch
import torch.nn.functional as F
from torch.nn import Linear as Lin
from torch_geometric.nn import SplineConv, radius_graph, fps, global_mean_pool
from points.datasets import get_dataset
from points.train_eval import run
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('--lr_decay_step_size', type=int, default=50)
parser.add_argument('--weight_decay', type=float, default=0)
args = parser.parse_args()
class Net(torch.nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = SplineConv(1, 64, dim=3, kernel_size=5)
self.conv2 = SplineConv(64, 64, dim=3, kernel_size=5)
self.conv3 = SplineConv(64, 128, dim=3, kernel_size=5)
self.lin1 = Lin(128, 256)
self.lin2 = Lin(256, 256)
self.lin3 = Lin(256, num_classes)
def forward(self, pos, batch):
x = pos.new_ones((pos.size(0), 1))
radius = 0.2
edge_index = radius_graph(pos, r=radius, batch=batch)
pseudo = (pos[edge_index[1]] - pos[edge_index[0]]) / (2 * radius) + 0.5
pseudo = pseudo.clamp(min=0, max=1)
x = F.elu(self.conv1(x, edge_index, pseudo))
idx = fps(pos, batch, ratio=0.5)
x, pos, batch = x[idx], pos[idx], batch[idx]
radius = 0.4
edge_index = radius_graph(pos, r=radius, batch=batch)
pseudo = (pos[edge_index[1]] - pos[edge_index[0]]) / (2 * radius) + 0.5
pseudo = pseudo.clamp(min=0, max=1)
x = F.elu(self.conv2(x, edge_index, pseudo))
idx = fps(pos, batch, ratio=0.25)
x, pos, batch = x[idx], pos[idx], batch[idx]
radius = 1
edge_index = radius_graph(pos, r=radius, batch=batch)
pseudo = (pos[edge_index[1]] - pos[edge_index[0]]) / (2 * radius) + 0.5
pseudo = pseudo.clamp(min=0, max=1)
x = F.elu(self.conv3(x, edge_index, pseudo))
x = global_mean_pool(x, batch)
x = F.elu(self.lin1(x))
x = F.elu(self.lin2(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin3(x)
return F.log_softmax(x, dim=-1)
train_dataset, test_dataset = get_dataset(num_points=1024)
model = Net(train_dataset.num_classes)
run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr,
args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay)
|
11584025
|
import cv2
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
# * syn where to set this
# must use 'Agg' to plot out onto image
matplotlib.use("Agg")
####
def fig2data(fig, dpi=180):
"""Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it.
Args:
fig: a matplotlib figure
Return: a numpy 3D array of RGBA values
"""
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
####
class _Scalar(object):
@staticmethod
def to_console(value):
return "%0.5f" % value
@staticmethod
def to_json(value):
return value
@staticmethod
def to_tensorboard(value):
return "scalar", value
####
class _ConfusionMatrix(object):
@staticmethod
def to_console(value):
value = pd.DataFrame(value)
value.index.name = "True"
value.columns.name = "Pred"
formatted_value = value.to_string()
return "\n" + formatted_value
@staticmethod
def to_json(value):
value = pd.DataFrame(value)
value.index.name = "True"
value.columns.name = "Pred"
value = value.unstack().rename("value").reset_index()
value = pd.Series({"conf_mat": value})
formatted_value = value.to_json(orient="records")
return formatted_value
@staticmethod
def to_tensorboard(value):
def plot_confusion_matrix(
cm, target_names, title="Confusion matrix", cmap=None, normalize=False
):
"""given a sklearn confusion matrix (cm), make a nice plot.
Args:
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / np.sum(cm).astype("float")
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap("Blues")
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(
j,
i,
"{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
else:
plt.text(
j,
i,
"{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel(
"Predicted label\naccuracy={:0.4f}; misclass={:0.4f}".format(
accuracy, misclass
)
)
plot_confusion_matrix(value, ["0", "1"])
img = fig2data(plt.gcf())
plt.close()
return "image", img
####
class _Image(object):
@staticmethod
def to_console(value):
# TODO: add warn for not possible or sthg here
return None
@staticmethod
def to_json(value):
# TODO: add warn for not possible or sthg here
return None
@staticmethod
def to_tensorboard(value):
# TODO: add method
return "image", value
__converter_dict = {"scalar": _Scalar, "conf_mat": _ConfusionMatrix, "image": _Image}
####
def serialize(value, input_format, output_format):
converter = __converter_dict[input_format]
if output_format == "console":
return converter.to_console(value)
elif output_format == "json":
return converter.to_json(value)
elif output_format == "tensorboard":
return converter.to_tensorboard(value)
else:
assert False, "Unknown format"
|
11584040
|
from typing import Optional, Tuple
import aiohttp.web
from kopf._cogs.clients.auth import APIContext, authenticated
from kopf._cogs.structs.credentials import ConnectionInfo
@authenticated
async def fn(
x: int,
*,
context: Optional[APIContext],
) -> Tuple[APIContext, int]:
return context, x + 100
async def test_session_is_injected(
fake_vault, resp_mocker, aresponses, hostname, resource, namespace):
result = {}
get_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, resource.get_url(namespace=namespace, name='xyz'), 'get', get_mock)
context, result = await fn(1)
async with context.session:
assert context is not None
assert result == 101
async def test_session_is_passed_through(
fake_vault, resp_mocker, aresponses, hostname, resource, namespace):
result = {}
get_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, resource.get_url(namespace=namespace, name='xyz'), 'get', get_mock)
explicit_context = APIContext(ConnectionInfo(server='http://irrelevant/'))
context, result = await fn(1, context=explicit_context)
async with context.session:
assert context is explicit_context
assert result == 101
|
11584063
|
import pulsar
client = pulsar.Client('pulsar://localhost:6650')
msg_id = pulsar.MessageId.earliest
reader = client.create_reader('test', msg_id)
while True:
msg = reader.read_next()
print("Received message '{}' id='{}'".format(msg.data(), msg.message_id()))
client.close()
|
11584076
|
from securityheaders.checkers import Finding, FindingType, FindingSeverity
from securityheaders.models import ReferrerPolicy
from securityheaders.checkers.referrerpolicy import ReferrerPolicyChecker
class ReferrerPolicyInsecureChecker(ReferrerPolicyChecker):
def check(self, headers, opt_options=dict()):
findings = []
policy = self.getreferrerpolicy(headers)
if not policy:
return findings
if policy.unsafe_url():
findings.append(Finding(ReferrerPolicy.headerkey, FindingType.UNSAFE_URL, 'If this policy is set, it should not use unsafe-url and origin-when-cross-origin as it can transfer sensitive information (via the Referer header) from HTTPS environments to HTTP environments.', FindingSeverity.LOW, ReferrerPolicy.directive.UNSAFE_URL))
if policy.origin_when_cross_origin():
findings.append(Finding(ReferrerPolicy.headerkey, FindingType.ORIGIN_WHEN_CROSS_ORIGIN, 'If this policy is set, it should not use unsafe-url and origin-when-cross-origin as it can transfer sensitive information (via the Referer header) from HTTPS environments to HTTP environments.', FindingSeverity.LOW, ReferrerPolicy.directive.ORIGIN_WHEN_CROSS_ORIGIN))
return findings
|
11584094
|
import numpy as np
import pandas as pd
from DeepTCR.DeepTCR import DeepTCR_SS
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('font', family='Arial')
DTCRS = DeepTCR_SS('reg_mart1',device=2)
alpha = 'CAVNFGGGKLIF'
beta = 'CASSWSFGTEAFF'
input_alpha = np.array([alpha,alpha])
input_beta = np.array([beta,beta])
pred = DTCRS.Sequence_Inference(input_alpha,input_beta)
fig_rsl,ax_rsl = DTCRS.Residue_Sensitivity_Logo(input_alpha,input_beta,background_color='black',Load_Prev_Data=False)
fig_rsl.savefig('mart1_rsl.png',dpi=1200,facecolor='black')
fig,ax = plt.subplots(1,2,figsize=(10,5))
sns.swarmplot(data=DTCRS.df_alpha_list[0],x='pos',y='high',ax=ax[0])
i = 0
ax[i].set_xlabel('')
ax[i].set_ylabel('')
ax[i].set_xticklabels(list(alpha),size=24)
ax[i].tick_params(axis='y',labelsize=18)
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].spines['bottom'].set_visible(False)
ax[i].tick_params(axis='x',length=0)
ylim_alpha = ax[i].get_ylim()
sns.swarmplot(data=DTCRS.df_beta_list[0],x='pos',y='high',ax=ax[1])
i = 1
ax[i].set_xticklabels(list(beta),size=24)
ax[i].tick_params(axis='y',labelsize=18)
ax[i].set_xlabel('')
ax[i].set_ylabel('')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].spines['bottom'].set_visible(False)
ax[i].tick_params(axis='x',length=0)
ylim_beta = ax[i].get_ylim()
ylim = np.vstack([ylim_alpha,ylim_beta])
ylim_min = np.min(ylim)
ylim_max = np.max(ylim)
ax[0].set_ylim([ylim_min,ylim_max])
ax[1].set_ylim([ylim_min,ylim_max])
ax[0].axhline(pred[0],color='black')
ax[1].axhline(pred[0],color='black')
fig.savefig('mart1_rsl_dist.eps')
|
11584188
|
import logging
import coloredlogs
import argparse
import functools
log = logging.getLogger("main")
exception = log.exception
info = log.info
debug = log.debug
error = log.error
warn = log.warning
def get_arg(arg):
try:
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="log help", default="INFO")
args = parser.parse_args()
return vars(args)[arg]
except:
return "INFO"
def setup(level="INFO"):
print("setting logging with level: ", level)
log.setLevel(level)
logging.getLogger().setLevel(logging.ERROR)
coloredlogs.install(
level=level,
fmt="%(asctime)s %(threadName)s %(levelname)s %(message)s",
logger=log,
)
setup(get_arg("log"))
|
11584247
|
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import add_arg_scope
import numpy as np
from functools import partial
from net.ops import random_sqaure, Margin, fixed_bbox_withMargin, bbox2mask
from net.ops import confidence_driven_mask, relative_spatial_variant_mask, deconv_frac_strided
from net.ops import flatten, gan_wgan_loss, gradients_penalty, random_interpolates
from net.ops import subpixel_conv, bilinear_conv, context_normalization, max_downsampling, unfold_conv
from net.ops import id_mrf_reg
from util.util import f2uint
class SemanticRegenerationNet:
def __init__(self):
self.name = 'SemanticRegenerationNet'
self.conv5 = partial(tf.layers.conv2d, kernel_size=5, activation=tf.nn.elu, padding='SAME')
self.conv3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')
self.d_unit = partial(tf.layers.conv2d, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='SAME')
@add_arg_scope
def _deconv(self, x, filters, name='deconv', reuse=False):
h, w = x.get_shape().as_list()[1:3]
x = tf.image.resize_nearest_neighbor(x, [h * 2, w * 2], align_corners=True)
with tf.variable_scope(name, reuse=reuse):
x = self.conv3(inputs=x, filters=filters, strides=1, name=name+'_conv')
return x
@add_arg_scope
def FEN(self, x, cnum):
conv3, conv5, deconv = self.conv3, self.conv5, self._deconv
x = conv5(inputs=x, filters=cnum, strides=1, name='conv1')
x = conv3(inputs=x, filters=cnum * 2, strides=2, name='conv2_downsample')
x = conv3(inputs=x, filters=cnum * 2, strides=1, name='conv3')
x = conv3(inputs=x, filters=cnum * 4, strides=2, name='conv4_downsample')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='conv5')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='conv6')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=2, name='conv7_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=4, name='conv8_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=8, name='conv9_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=16, name='conv10_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='conv11')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='conv12')
x = deconv(x, filters=cnum * 2, name='conv13_upsample')
x = conv3(inputs=x, filters=cnum * 2, strides=1, name='conv14')
x = deconv(x, filters=cnum, name='conv15_upsample')
return x
@add_arg_scope
def CPN(self, x_fe, x_in, mask, cnum, use_cn=True, alpha=0.5):
conv3, conv5, deconv = self.conv3, self.conv5, self._deconv
ones_x = tf.ones_like(x_in)[:, :, :, 0:1]
xnow = tf.concat([x_fe, x_in, mask * ones_x], axis=3)
x = conv5(inputs=xnow, filters=cnum, strides=1, name='xconv1')
x = conv3(inputs=x, filters=cnum, strides=2, name='xconv2_downsample')
x = conv3(inputs=x, filters=cnum * 2, strides=1, name='xconv3')
x = conv3(inputs=x, filters=cnum * 2, strides=2, name='xconv4_downsample')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='xconv5')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='xconv6')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=2, name='xconv7_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=4, name='xconv8_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=8, name='xconv9_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, dilation_rate=16, name='xconv10_atrous')
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='allconv11')
if use_cn:
x = context_normalization(x, mask, alpha=alpha)
x = conv3(inputs=x, filters=cnum * 4, strides=1, name='allconv12')
x = deconv(x, filters=cnum * 2, name='allconv13_upsample')
x = conv3(inputs=x, filters=cnum * 2, strides=1, name='allconv14')
x = deconv(x, filters=cnum, name='allconv15_upsample')
x = conv3(inputs=x, filters=cnum // 2, strides=1, name='allconv16')
x = tf.layers.conv2d(inputs=x, kernel_size=3, filters=3, strides=1, activation=None, padding='SAME',
name='allconv17')
x = tf.clip_by_value(x, -1, 1)
return x
def build_generator(self, x, mask, margin, config=None, reuse=False, name='inpaint_net'):
feature_expansion_op = None
if config is not None:
use_cn = config.use_cn
assert config.feat_expansion_op in ['subpixel', 'deconv', 'bilinear-conv', 'unfold']
if config.feat_expansion_op == 'subpixel':
feature_expansion_op = subpixel_conv
elif config.feat_expansion_op == 'deconv':
feature_expansion_op = deconv_frac_strided
elif config.feat_expansion_op == 'unfold':
feature_expansion_op = unfold_conv
else:
feature_expansion_op = bilinear_conv
else:
use_cn = True
feature_expansion_op = subpixel_conv
target_shape = mask.get_shape().as_list()[1:3]
xin_expanded = tf.pad(x, [[0, 0], [margin.top, margin.bottom], [margin.left, margin.right], [0, 0]])
xin_expanded.set_shape((x.get_shape().as_list()[0], target_shape[0], target_shape[1], 3))
expand_scale_ratio = int(np.prod(mask.get_shape().as_list()[1:3])/np.prod(x.get_shape().as_list()[1:3]))
# two stage network
cnum = config.g_cnum
with tf.variable_scope(name, reuse=reuse):
x = self.FEN(x, cnum)
# subpixel module, ensure the output channel the same as the input
if config.feat_expansion_op == 'subpixel':
x_fe = feature_expansion_op(x, cnum * expand_scale_ratio, 3, target_shape,
name='feat_expansion_'+config.feat_expansion_op)
elif config.feat_expansion_op == 'unfold':
x_fe = feature_expansion_op(x, cnum, 3, margin, target_shape,
name='feat_expansion_'+config.feat_expansion_op)
else:
x_fe = feature_expansion_op(x, cnum, 3, target_shape,
name='feat_expansion_'+config.feat_expansion_op)
x = self.CPN(x_fe, xin_expanded, mask, cnum, use_cn, config.fa_alpha)
return x, x_fe
def build_wgan_contextual_discriminator(self, x, mask, config, reuse=False):
cnum = config.d_cnum
dis_conv = self.d_unit
with tf.variable_scope('D_context', reuse=reuse):
h, w = x.get_shape().as_list()[1:3]
x = dis_conv(x, cnum, name='dconv1')
x = dis_conv(x, cnum*2, name='dconv2')
x = dis_conv(x, cnum*4, name='dconv3')
x = tf.layers.conv2d(inputs=x, kernel_size=3, filters=1, strides=1, activation=None, padding='SAME',
name='dconv4')
mask = max_downsampling(mask, ratio=8)
x = x * mask
x = tf.reduce_sum(x, axis=[1, 2, 3]) / tf.reduce_sum(mask, axis=[1, 2, 3])
mask_local = tf.image.resize_nearest_neighbor(mask, [h, w], align_corners=True)
return x, mask_local
def build_wgan_global_discriminator(self, x, config, reuse=False):
cnum = config.d_cnum
dis_conv = self.d_unit
with tf.variable_scope('D_global', reuse=reuse):
x = dis_conv(x, cnum, name='conv1')
x = dis_conv(x, cnum*2, name='conv2')
x = dis_conv(x, cnum*4, name='conv3')
x = dis_conv(x, cnum*2, name='conv4')
x = flatten(x, name='flatten')
return x
def build_wgan_discriminator(self, batch_global, config, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
dglobal = self.build_wgan_global_discriminator(
batch_global, config=config, reuse=reuse)
dout_global = tf.layers.dense(dglobal, 1, name='dout_global_fc')
return dout_global
def build_contextual_wgan_discriminator(self, batch_global, mask, config, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
dglobal = self.build_wgan_global_discriminator(batch_global, config=config, reuse=reuse)
dout_global = tf.layers.dense(dglobal, 1, name='dout_global_fc')
dout_local, mask_local = self.build_wgan_contextual_discriminator(batch_global, mask,
config=config, reuse=reuse)
return dout_local, dout_global, mask_local
def build_net(self, batch_data, config, summary=True, reuse=False):
batch_pos = batch_data / 127.5 - 1.
if config.random_mask is True:
self.bbox_gen = random_sqaure
else:
self.bbox_gen = fixed_bbox_withMargin
bbox, margin = self.bbox_gen(config)
mask = bbox2mask(bbox, config)
mask = 1. - mask # we need to predict context
h, w = batch_pos.get_shape().as_list()[1:3]
if config.random_mask is False:
if config.mask_shapes[0] == 64: # for clothes dataset
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos,
margin.top, margin.left,
config.mask_shapes[0], config.mask_shapes[1])
else:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, margin.top, margin.left, h, w//2)
else:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, margin.top,
margin.left, config.mask_shapes[0], config.mask_shapes[1])
if config.l1_type == 0:
mask_priority = relative_spatial_variant_mask(mask)
elif config.l1_type == 1:
mask_priority = confidence_driven_mask(mask)
else:
mask_priority = mask
x, x_fe = self.build_generator(batch_incomplete, mask, margin, config=config, reuse=reuse)
batch_predicted = x
losses = {}
# apply mask and complete image
batch_complete = batch_predicted*mask + batch_pos*(1.-mask)
if not config.pretrain_network:
config.feat_style_layers = {'conv3_2': 1.0, 'conv4_2': 1.0}
config.feat_content_layers = {'conv4_2': 1.0}
config.mrf_style_w = 1.0
config.mrf_content_w = 1.0
losses['id_mrf_loss'] = id_mrf_reg(batch_predicted, batch_pos, config)
tf.summary.scalar('losses/id_mrf_loss', losses['id_mrf_loss'])
losses['l1_loss'] = config.pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - x) * mask_priority)
losses['ae_loss'] = config.pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - x) * (1.-mask))
losses['ae_loss'] /= tf.reduce_mean(1.-mask)
if summary:
tf.summary.scalar('losses/l1_loss', losses['l1_loss'])
tf.summary.scalar('losses/ae_loss', losses['ae_loss'])
if config.random_mask is True:
batch_incomplete_pad = tf.pad(batch_incomplete,
[[0, 0], [margin.top, margin.bottom], [margin.left, margin.right], [0, 0]])
else:
if config.mask_shapes[0] == 64:
batch_incomplete_pad = tf.pad(batch_incomplete,
[[0, 0], [margin.top, margin.bottom], [margin.left, margin.right],
[0, 0]])
else:
batch_incomplete_pad = batch_incomplete
viz_img = tf.concat([batch_pos, batch_incomplete_pad, batch_complete], axis=2)[:, :, :, ::-1]
tf.summary.image('gt__input w padding__prediction', f2uint(viz_img))
# gan
batch_pos_neg = tf.concat([batch_pos, batch_complete], axis=0)
# wgan with gradient penalty
build_critics = self.build_contextual_wgan_discriminator
# seperate gan
global_wgan_loss_alpha = 1.0
pos_neg_local, pos_neg_global, mask_local = build_critics(batch_pos_neg, mask, config=config, reuse=reuse)
pos_local, neg_local = tf.split(pos_neg_local, 2)
pos_global, neg_global = tf.split(pos_neg_global, 2)
# wgan loss
g_loss_local, d_loss_local = gan_wgan_loss(pos_local, neg_local, name='gan/local_gan')
g_loss_global, d_loss_global = gan_wgan_loss(pos_global, neg_global, name='gan/global_gan')
losses['g_loss'] = global_wgan_loss_alpha * g_loss_global + g_loss_local
losses['d_loss'] = d_loss_global + d_loss_local
# gp
interpolates_global = random_interpolates(batch_pos, batch_complete)
interpolates_local = interpolates_global
dout_local, dout_global, _ = build_critics(interpolates_global, mask, config=config, reuse=True)
# apply penalty
penalty_local = gradients_penalty(interpolates_local, dout_local, mask=mask_local)
penalty_global = gradients_penalty(interpolates_global, dout_global, mask=mask)
losses['gp_loss'] = config.wgan_gp_lambda * (penalty_local + penalty_global)
losses['d_loss'] = losses['d_loss'] + losses['gp_loss']
if summary and not config.pretrain_network:
tf.summary.scalar('discriminator/d_loss', losses['d_loss'])
tf.summary.scalar('wgan_loss/gp_loss', losses['gp_loss'])
if config.pretrain_network:
losses['g_loss'] = 0
else:
losses['g_loss'] = config.gan_loss_alpha * losses['g_loss']
losses['g_loss'] += config.mrf_alpha * losses['id_mrf_loss']
losses['g_loss'] += config.l1_loss_alpha * losses['l1_loss']
losses['g_loss'] += config.ae_loss_alpha * losses['ae_loss']
g_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'inpaint_net')
d_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')
return g_vars, d_vars, losses
def evaluate(self, images, masks, margin, config, reuse=False):
masks = 1 - masks
batch_pos = images / 127.5 - 1.
h, w = batch_pos.get_shape().as_list()[1:3]
if config.random_mask is False:
if w == 128:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, 0, 0, 64, 128)
margin = Margin(0, 0, 256-64, 0)
elif w == 512 or w == 1024:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, 0, w // 4, h, w // 2)
margin = Margin(0, w//4, 0, w//4)
else:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, h // 4, w // 4, h // 2, w // 2)
margin = Margin(h // 4, w // 4, h // 4, w // 4)
else:
batch_incomplete = tf.image.crop_to_bounding_box(batch_pos, margin.top, margin.left,
config.mask_shapes[0], config.mask_shapes[1])
x, x_fe = self.build_generator(batch_incomplete, masks, margin, config=config, reuse=reuse)
batch_predict = x
# apply mask and reconstruct
batch_complete = batch_predict*masks + batch_pos*(1-masks)
return batch_complete, x_fe
class HRSemanticRegenerationNet(SemanticRegenerationNet):
def __init__(self):
super(HRSemanticRegenerationNet, self).__init__()
self.name = 'HRSemanticRegenerationNet'
def build_generator(self, x, mask, config=None, reuse=False, name='inpaint_net'):
xin = x
if config is not None:
use_cn = config.use_cn
else:
use_cn = True
# two stage network
cnum = config.g_cnum
with tf.variable_scope(name, reuse=reuse):
x_fe = self.FEN(x, cnum)
x = self.CPN(x_fe, xin, mask, cnum, use_cn, config.fa_alpha)
return x, x_fe
def build_net(self, batch_data, config, summary=True, reuse=False):
batch_pos = batch_data / 127.5 - 1.
if config.random_mask is True:
self.bbox_gen = random_sqaure
else:
self.bbox_gen = fixed_bbox_withMargin
bbox, _ = self.bbox_gen(config)
mask = bbox2mask(bbox, config)
mask = 1. - mask # we need to predict context
h, w = batch_pos.get_shape().as_list()[1:3]
batch_incomplete = batch_pos * (1-mask)
if config.l1_type == 0:
mask_priority = relative_spatial_variant_mask(mask)
elif config.l1_type == 1:
mask_priority = confidence_driven_mask(mask)
else:
mask_priority = mask
x, x_fe = self.build_generator(batch_incomplete, mask, config=config, reuse=reuse)
batch_predicted = x
losses = {}
# apply mask and complete image
batch_complete = batch_predicted*mask + batch_pos*(1.-mask)
if not config.pretrain_network:
config.feat_style_layers = {'conv3_2': 1.0, 'conv4_2': 1.0}
config.feat_content_layers = {'conv4_2': 1.0}
config.mrf_style_w = 1.0
config.mrf_content_w = 1.0
losses['id_mrf_loss'] = id_mrf_reg(batch_predicted, batch_pos, config)
tf.summary.scalar('losses/id_mrf_loss', losses['id_mrf_loss'])
losses['l1_loss'] = config.pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - x) * mask_priority)
losses['ae_loss'] = config.pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - x) * (1.-mask))
losses['ae_loss'] /= tf.reduce_mean(1.-mask)
if summary:
tf.summary.scalar('losses/l1_loss', losses['l1_loss'])
tf.summary.scalar('losses/ae_loss', losses['ae_loss'])
viz_img = tf.concat([batch_pos, batch_incomplete, batch_complete], axis=2)[:, :, :, ::-1]
tf.summary.image('gt__input w padding__prediction', f2uint(viz_img))
# gan
batch_pos_neg = tf.concat([batch_pos, batch_complete], axis=0)
# wgan with gradient penalty
build_critics = self.build_contextual_wgan_discriminator
# seperate gan
global_wgan_loss_alpha = 1.0
pos_neg_local, pos_neg_global, mask_local = build_critics(batch_pos_neg, mask, config=config, reuse=reuse)
pos_local, neg_local = tf.split(pos_neg_local, 2)
pos_global, neg_global = tf.split(pos_neg_global, 2)
# wgan loss
g_loss_local, d_loss_local = gan_wgan_loss(pos_local, neg_local, name='gan/local_gan')
g_loss_global, d_loss_global = gan_wgan_loss(pos_global, neg_global, name='gan/global_gan')
losses['g_loss'] = global_wgan_loss_alpha * g_loss_global + g_loss_local
losses['d_loss'] = d_loss_global + d_loss_local
# gp
interpolates_global = random_interpolates(batch_pos, batch_complete)
interpolates_local = interpolates_global
dout_local, dout_global, _ = build_critics(interpolates_global, mask, config=config, reuse=True)
# apply penalty
penalty_local = gradients_penalty(interpolates_local, dout_local, mask=mask_local)
penalty_global = gradients_penalty(interpolates_global, dout_global, mask=mask)
losses['gp_loss'] = config.wgan_gp_lambda * (penalty_local + penalty_global)
losses['d_loss'] = losses['d_loss'] + losses['gp_loss']
if summary and not config.pretrain_network:
tf.summary.scalar('discriminator/d_loss', losses['d_loss'])
tf.summary.scalar('wgan_loss/gp_loss', losses['gp_loss'])
if config.pretrain_network:
losses['g_loss'] = 0
else:
losses['g_loss'] = config.gan_loss_alpha * losses['g_loss']
losses['g_loss'] += config.mrf_alpha * losses['id_mrf_loss']
losses['g_loss'] += config.l1_loss_alpha * losses['l1_loss']
losses['g_loss'] += config.ae_loss_alpha * losses['ae_loss']
g_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'inpaint_net')
d_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')
return g_vars, d_vars, losses
def evaluate(self, images, masks, margin, config, reuse=False):
masks = 1 - masks
batch_pos = images / 127.5 - 1.
batch_incomplete = batch_pos * (1 - masks)
x, x_fe = self.build_generator(batch_incomplete, masks, config=config, reuse=reuse)
batch_predict = x
# apply mask and reconstruct
batch_complete = batch_predict*masks + batch_pos*(1-masks)
return batch_complete, x_fe
|
11584320
|
from __future__ import print_function
import os
import subprocess
import glob
import time
import py_compile
import sys
from unittest import TestCase
class TestExamples(TestCase):
def setUp(self):
os.chdir("examples")
def tearDown(self):
if os.path.basename(os.getcwd()) == "examples":
os.chdir("..")
else:
os.chdir("../..")
def test_examples(self):
if (os.getenv("TRAVIS") == "TRUE") and (
os.getenv("TASK") != "EXAMPLES"):
return
PY3 = sys.version_info[0] == 3
noTests = False
skipTravis = [
"biopython",
"bidict",
"redis",
"rsa",
"simplejson",
"solc",
"vyper",
"newsortedcontainers",
"sortedcontainers",
"tensorflow",
"pystan",
"z3"]
skipPY3 = [
"XML",
"microjson",
"rsa",
"turtle"]
justCompile = [
"AVL",
"arcpy",
"arrow",
"c",
"datarray_inference",
"dateutil",
"gmpy2",
"hypothesis_heaps",
"lopsided_grammar",
"maze",
"nondet_talk_examples",
"numpy",
"osquery",
"parallelsorts",
"pyfakefs",
"stringh",
"sympy",
"tictactoe",
"tstl",
"turtle"]
problemsFree = []
testSmallcheck = True
noSmallcheck = [
"danluuexample",
"eval",
"rsa",
"tensorflow"]
shouldFail = [
"newxml.tstl",
"nutshell.tstl",
"onestep.tstl",
"statechanginginvar.tstl",
"turtle.tstl",
"water.tstl"]
if PY3:
justCompile.extend(skipPY3)
expectedBytecode = ['arcpy/arcpy1.tstl', 'arcpy/arcpy5.tstl']
for f in os.listdir("."):
if os.path.isdir(f):
if ((os.getenv("TRAVIS") == "TRUE") and
(os.getenv("SUBTASK") != "JUSTCOMPILE") and (f in justCompile)):
continue
if ((os.getenv("TRAVIS") == "TRUE") and
(os.getenv("SUBTASK") == "JUSTCOMPILE") and (f not in justCompile)):
continue
if ((os.getenv("TRAVIS") == "TRUE") and
(os.getenv("SUBTASK") == "SMALL") and (f in noSmallcheck)):
continue
os.chdir(f)
print("=" * 80)
for t in glob.glob("*.tstl"):
print(f + "/" + t, end=": ")
print("COMPILING", end="...")
sys.stdout.flush()
tstlCmd = ["tstl", t]
if "tensorflow" in f:
tstlCmd += ["--noReload"]
r = subprocess.call(tstlCmd)
self.assertEqual(r, 0)
if (os.getenv("TRAVIS") == "TRUE") and (f in skipTravis):
print("OK")
continue
print("COMPILING TO BYTECODE", end="...")
sys.stdout.flush()
try:
start = time.time()
py_compile.compile("sut.py", doraise=True)
print("NEEDED", round(time.time() - start, 2), "SECONDS", end="...")
except py_compile.PyCompileError as e:
print("BYTECODE COMPILATION FAILED!")
print(e)
self.assertTrue((f + "/" + t) in expectedBytecode)
continue
if (noTests or (f in justCompile) or
((os.getenv("TRAVIS") == "TRUE") and
(os.getenv("SUBTASK") == "JUSTCOMPILE"))):
print("OK!")
os.remove("sut.py")
try:
os.remove("sut.pyc")
except OSError:
pass
continue
print("RUNNING", end="...")
sys.stdout.flush()
if (os.getenv("TRAVIS") != "TRUE") or (os.getenv("SUBTASK") == "NOBUGS"):
rtCmd = [
"tstl_rt",
"--timeout",
"16",
"--timedProgress",
"5",
"--noCheck",
"--uncaught",
"--silentSUT"]
start = time.time()
p = subprocess.Popen(rtCmd)
# Big timeout is for huge coverage dumps like sympy
while (
p.poll() is None) and (
(time.time() - start) < 300):
time.sleep(1)
self.assertNotEqual(p.poll(), None)
r = p.returncode
self.assertEqual(r, 0)
print("OK!")
sys.stdout.flush()
if (os.getenv("TRAVIS") != "TRUE") or (os.getenv("SUBTASK") == "FREE"):
rtCmd = [
"tstl_rt",
"--timeout",
"16",
"--timedProgress",
"5",
"--noCover",
"--output",
".freefail.test",
"--keepLast",
"--silentSUT"]
start = time.time()
if f not in problemsFree:
with open(os.devnull, 'w') as dnull:
p = subprocess.Popen(rtCmd, stdout=dnull)
else:
p = subprocess.Popen(rtCmd)
# Big timeout is for huge coverage dumps like sympy
while (
p.poll() is None) and (
(time.time() - start) < 300):
time.sleep(1)
self.assertNotEqual(p.poll(), None)
r = p.returncode
if t in shouldFail:
self.assertEqual(r, 255)
self.assertTrue(r in [0, 255])
if r == 255:
rr0 = subprocess.call(["tstl_replay",
".freefail.test"])
self.assertEqual(rr0, 255)
if t == "onestep.tstl":
with open(".freefail.test", 'r') as ff:
self.assertEqual(len(ff.readlines()), 1)
rr1 = subprocess.call(["tstl_reduce",
".freefail.full.test",
".freesmall.test",
"--verbose",
"True"])
self.assertEqual(rr1, 0)
if t == "onestep.tstl":
with open(".freesmall.test", 'r') as ff:
self.assertEqual(len(ff.readlines()), 1)
rr2 = subprocess.call(["tstl_replay",
".freesmall.test"])
self.assertEqual(rr2, 255)
if (testSmallcheck and (f not in noSmallcheck) and
((os.getenv("TRAVIS") != "TRUE") or (os.getenv("SUBTASK") == "SMALL"))):
scCmd = [
"tstl_smallcheck",
"--depth",
"2",
"--recursive",
"1",
"--multiple"]
start = time.time()
with open(os.devnull, 'w') as dnull:
p = subprocess.Popen(scCmd)
while (
p.poll() is None) and (
(time.time() - start) < 300):
time.sleep(1)
self.assertNotEqual(p.poll(), None)
r = p.returncode
self.assertTrue(r in [0, 255])
scCmd = [
"tstl_smallcheck",
"--depth",
"2",
"--recursive",
"1",
"--visited",
"--multiple"]
start = time.time()
with open(os.devnull, 'w') as dnull:
p = subprocess.Popen(scCmd)
while (
p.poll() is None) and (
(time.time() - start) < 300):
time.sleep(1)
self.assertNotEqual(p.poll(), None)
r = p.returncode
self.assertTrue(r in [0, 255])
scCmd = [
"tstl_smallcheck",
"--depth",
"2",
"--recursive",
"1",
"--visitedList",
"--reverse",
"--multiple"]
start = time.time()
with open(os.devnull, 'w') as dnull:
p = subprocess.Popen(scCmd)
while (
p.poll() is None) and (
(time.time() - start) < 300):
time.sleep(1)
self.assertNotEqual(p.poll(), None)
r = p.returncode
self.assertTrue(r in [0, 255])
os.remove("sut.py")
try:
os.remove("sut.pyc")
except OSError:
pass
os.chdir("..")
sys.stdout.flush()
|
11584323
|
import re
__product__ = "HTML5"
__description__ = (
"HTML5 is a markup language used for structuring and presenting "
"content on the World Wide Web. It is the fifth and current major "
"version of the HTML standard."
)
def search(html, **kwargs):
html = str(html)
plugin_detection_schema = (
re.compile(r".html5.", re.I),
re.compile(r"\bhtml\d+", re.I)
)
for plugin in plugin_detection_schema:
if plugin.search(html) is not None:
return True
|
11584332
|
from copy import deepcopy
from typing import Dict, List, Any, Union
from ..config import fill_default, is_algorithm_distributed
from ..pl_logger import LocalMediaLogger
from ..dataset import DatasetResult, RLDataset, log_video, determine_precision
from ..launcher import Launcher, DistributedLauncher
from machin.frame.algorithms import *
from machin.env.utils.openai_gym import disable_view_window
from machin.utils.conf import Config
from machin.utils.save_env import SaveEnv
from pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from gym.spaces import Box, Discrete, MultiBinary, MultiDiscrete
import gym
import torch as t
import pytorch_lightning as pl
disable_view_window()
def _is_simple_space(space):
return type(space) in (Box, Discrete, MultiBinary, MultiDiscrete)
def _is_discrete_space(space):
return type(space) in (Discrete, MultiDiscrete)
def _is_continuous_space(space):
return type(space) in (Box, MultiBinary)
class RLGymDiscActDataset(RLDataset):
"""
This dataset is using a discrete action openai-gym environment.
Notes:
The forward method of Q networks, actor networks must accept arguments
of default names like "action", "state", and not custom names like
"some_action", "some_state".
All your networks should not have custom outputs after default ones
like action, action_log_prob, etc.
Notes:
The environment should accept an python int number in each step.
The environment should output a simple observation space, dict and
tuple are not supported. The first dimension size should be 1 as it will
be used as the batch size, if not, it will be automatically added.
The environment should have a finite number of acting steps.
Args:
frame: Algorithm framework.
env: Gym environment instance.
act_kwargs: Additional keyword arguments passed to act functions
of different frameworks.
"""
early_stopping_monitor = "total_reward"
def __init__(
self,
frame,
env,
render_every_episode: int = 100,
act_kwargs: Dict[str, Any] = None,
):
super().__init__()
self.frame = frame
self.env = env
self.render_every_episode = render_every_episode
self.act_kwargs = act_kwargs or {}
self._precision = determine_precision(
[getattr(frame, m) for m in frame._is_top]
)
self.counter = 0
assert type(env.action_space) == Discrete
assert _is_simple_space(env.observation_space)
def __next__(self):
result = DatasetResult()
terminal = False
total_reward = 0
state = t.tensor(self.env.reset(), dtype=self._precision)
state = state.flatten().unsqueeze(0)
# manual sync and then disable syncing if framework is distributed.
getattr(self.frame, "manual_sync", lambda: None)()
getattr(self.frame, "set_sync", lambda x: None)(False)
rendering = []
while not terminal:
if self.counter % self.render_every_episode == 0:
rendering.append(self.env.render(mode="rgb_array"))
with t.no_grad():
old_state = state
# agent model inference
if type(self.frame) in (A2C, PPO, SAC, GAIL, A3C, IMPALA):
action = self.frame.act({"state": old_state}, **self.act_kwargs)[0]
elif type(self.frame) in (DQN, DQNPer, DQNApex, RAINBOW):
action = self.frame.act_discrete_with_noise(
{"state": old_state}, **self.act_kwargs
)
elif type(self.frame) in (DDPG, DDPGPer, HDDPG, TD3, DDPGApex):
action, probs = self.frame.act_discrete_with_noise(
{"state": old_state}, **self.act_kwargs
)
elif type(self.frame) in (ARS,):
action = self.frame.act({"state": old_state}, **self.act_kwargs)
else:
raise RuntimeError(f"Unsupported framework: {type(self.frame)}")
state, reward, terminal, info = self.env.step(action.item())
state = t.tensor(state, dtype=self._precision)
state = state.flatten().unsqueeze(0)
reward = float(reward)
total_reward += reward
if type(self.frame) in (DDPG, DDPGPer, HDDPG, TD3, DDPGApex):
action = probs
result.add_observation(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": reward,
"terminal": terminal,
}
)
result.add_log(info)
if len(rendering) > 0:
result.add_log({"video": (rendering, log_video)})
result.add_log({"total_reward": total_reward})
getattr(self.frame, "set_sync", lambda x: None)(True)
self.counter += 1
return result
class RLGymContActDataset(RLDataset):
"""
This dataset is using a contiguous action openai-gym environment.
Notes:
The forward method of Q networks, actor networks must accept arguments
of default names like "action", "state", and not custom names like
"some_action", "some_state".
All your networks should not have custom outputs after default ones
like action, action_log_prob, etc.
Notes:
The environment should accept a numpy float array in each step.
The environment should output a simple observation space, dict and
tuple are not supported. The first dimension size should be 1 as it will
be used as the batch size, if not, it will be automatically added.
The environment should have a finite number of acting steps.
Args:
frame: Algorithm framework.
env: Gym environment instance.
act_kwargs: Additional keyword arguments passed to act functions
of different frameworks.
"""
early_stopping_monitor = "total_reward"
def __init__(
self,
frame,
env,
render_every_episode: int = 100,
act_kwargs: Dict[str, Any] = None,
):
super().__init__()
self.frame = frame
self.env = env
self.render_every_episode = render_every_episode
self.act_kwargs = act_kwargs or {}
self._precision = determine_precision(
[getattr(frame, m) for m in frame._is_top]
)
self.counter = 0
assert type(env.action_space) == Box
assert _is_simple_space(env.observation_space)
def __next__(self):
result = DatasetResult()
terminal = False
total_reward = 0
state = t.tensor(self.env.reset(), dtype=self._precision)
state = state.flatten().unsqueeze(0)
# manual sync and then disable syncing if framework is distributed.
getattr(self.frame, "manual_sync", lambda: None)()
getattr(self.frame, "set_sync", lambda x: None)(False)
rendering = []
while not terminal:
if self.counter % self.render_every_episode == 0:
rendering.append(self.env.render(mode="rgb_array"))
with t.no_grad():
old_state = state
# agent model inference
if type(self.frame) in (A2C, PPO, SAC, GAIL, A3C, IMPALA):
action = self.frame.act({"state": old_state}, **self.act_kwargs)[0]
elif type(self.frame) in (DDPG, DDPGPer, HDDPG, TD3, DDPGApex):
action = self.frame.act_with_noise(
{"state": old_state}, **self.act_kwargs
)[0]
elif type(self.frame) in (ARS,):
action = self.frame.act({"state": old_state}, **self.act_kwargs)
else:
raise RuntimeError(f"Unsupported framework: {type(self.frame)}")
state, reward, terminal, info = self.env.step(
action.detach().cpu().numpy()
)
state = t.tensor(state, dtype=self._precision)
state = state.flatten().unsqueeze(0)
reward = float(reward)
total_reward += reward
result.add_observation(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": reward,
"terminal": terminal,
}
)
result.add_log(info)
if len(rendering) > 0:
result.add_log({"video": (rendering, log_video)})
result.add_log({"total_reward": total_reward})
getattr(self.frame, "set_sync", lambda x: None)(True)
self.counter += 1
return result
def gym_env_dataset_creator(frame, env_config):
env = gym.make(env_config["env_name"])
if _is_discrete_space(env.action_space):
return RLGymDiscActDataset(
frame,
env,
render_every_episode=env_config["render_every_episode"],
act_kwargs=env_config["act_kwargs"],
)
elif _is_continuous_space(env.action_space):
return RLGymContActDataset(
frame,
env,
render_every_episode=env_config["render_every_episode"],
act_kwargs=env_config["act_kwargs"],
)
else:
raise ValueError(
f"Gym environment {env_config['env_name']} has action space "
f"of type {env.action_space}, which is not supported."
)
def generate_env_config(config: Union[Dict[str, Any], Config] = None):
"""
Generate example OpenAI gym config.
"""
config = deepcopy(config) or {}
return fill_default(
{
"env": "openai_gym",
"train_env_config": {
"env_name": "CartPole-v1",
"render_every_episode": 100,
"act_kwargs": {},
},
"test_env_config": {
"env_name": "CartPole-v1",
"render_every_episode": 100,
"act_kwargs": {},
},
},
config,
)
def launch(config: Union[Dict[str, Any], Config], pl_callbacks: List[Callback] = None):
"""
Args:
config: All configs needed to launch a gym environment and initialize
the algorithm framework.
pl_callbacks: Additional callbacks used to modify training behavior.
Returns:
"""
pl_callbacks = pl_callbacks or []
# disable time formatting so all processes will use the same directory
# delay directory creation
s_env = SaveEnv(config.get("root_dir", None) or "./trial", time_format="")
checkpoint_callback = ModelCheckpoint(
dirpath=s_env.get_trial_model_dir(),
filename="{epoch:02d}-{total_reward:.2f}",
save_top_k=1,
monitor="total_reward",
mode="max",
period=1,
verbose=True,
)
early_stopping = EarlyStopping(
monitor="total_reward", mode="max", patience=config["early_stopping_patience"],
)
t_logger = TensorBoardLogger(s_env.get_trial_train_log_dir())
lm_logger = LocalMediaLogger(
s_env.get_trial_image_dir(), s_env.get_trial_image_dir()
)
is_distributed = is_algorithm_distributed(config)
trainer = pl.Trainer(
gpus=config.get("gpus", None),
num_processes=config.get("num_processes", 1),
num_nodes=config.get("num_nodes", 1),
callbacks=[checkpoint_callback, early_stopping] + pl_callbacks,
logger=[t_logger, lm_logger],
limit_train_batches=config["episode_per_epoch"],
max_steps=config["max_episodes"],
accelerator="ddp" if is_distributed else None,
)
if is_distributed:
model = DistributedLauncher(config, gym_env_dataset_creator)
else:
model = Launcher(config, gym_env_dataset_creator)
trainer.fit(model)
|
11584349
|
import torch
from torch import nn
class BitShift(nn.Module):
def __init__(self, direction):
if direction not in ("LEFT", "RIGHT"):
raise ValueError("invalid BitShift direction {}".format(direction))
self.direction = direction
super().__init__()
def forward(self, X, Y):
if self.direction == "LEFT":
return X << Y
elif self.direction == "RIGHT":
return X >> Y
|
11584350
|
import numpy
import util
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class QuantizedLinearFunction(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim >= 2,
w_type.ndim == 2,
type_check.prod(x_type.shape[1:]) == w_type.shape[1],
)
if n_in.eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x = _as_mat(inputs[0])
W = inputs[1]
# quantized x, w
xq = util._log_quant_cpu(x * 64, 64)
Wq = util._log_quant_cpu(W * 64, 64)
y = xq.dot(Wq.T) / float(64 ** 2)
if len(inputs) == 3:
b = inputs[2]
y += b
return y,
def forward_gpu(self, inputs):
x = _as_mat(inputs[0])
W = inputs[1]
xq = util._log_quant_gpu(x * 64, 64)
Wb = util._log_quant_gpu(W * 64, 64)
y = x.dot(Wb.T)
if len(inputs) == 3:
b = inputs[2]
y += b
return y,
def backward_cpu(self, inputs, grad_outputs):
x = _as_mat(inputs[0])
W = inputs[1]
gy = grad_outputs[0]
# quantized x, w
xq = util._log_quant_cpu(x * 64, 64)
Wq = util._log_quant_cpu(W * 64, 64)
gyq = util._log_quant_cpu(gy * 64, 64)
gx = gyq.dot(Wq).reshape(inputs[0].shape) / float(64 ** 2)
gW = gyq.T.dot(xq) / float(64 ** 2)
if len(inputs) == 3:
gb = gy.sum(0)
return gx, gW, gb
else:
return gx, gW
def backward_gpu(self, inputs, grad_outputs):
x = _as_mat(inputs[0])
W = inputs[1]
gy = grad_outputs[0]
# quantized x, w
xq = util._log_quant_gpu(x * 64, 64)
Wq = util._log_quant_gpu(W * 64, 64)
gyq = util._log_quant_gpu(gy * 64, 64)
gx = gyq.dot(Wq).reshape(inputs[0].shape) / float(64 ** 2)
gW = gyq.T.dot(xq) / float(64 ** 2)
if len(inputs) == 3:
gb = gy.sum(0)
return gx, gW, gb
else:
return gx, gW
def quantized_linear(x, W, b=None):
"""Quantized Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
:math:`Y = xW^\\top + b`.
Args:
x (~chainer.Variable): Input variable. Its first dimension is assumed
to be the *minibatch dimension*. The other dimensions are treated
as concatenated one dimension whose size must be ``N``.
W (~chainer.Variable): Weight variable of shape ``(M, N)``.
b (~chainer.Variable): Bias variable (optional) of shape ``(M,)``..
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`~chainer.links.Linear`
"""
if b is None:
return QuantizedLinearFunction()(x, W)
else:
return QuantizedLinearFunction()(x, W, b)
|
11584359
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group, AnonymousUser
from django.db import models
from guardian.testapp.tests.conf import skipUnlessTestApp
from guardian.testapp.tests.test_core import ObjectPermissionTestCase
from guardian.testapp.models import Project
from guardian.testapp.models import ProjectUserObjectPermission
from guardian.testapp.models import ProjectGroupObjectPermission
from guardian.models import UserObjectPermission
from guardian.models import UserObjectPermissionBase
from guardian.models import GroupObjectPermission
from guardian.utils import get_anonymous_user
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
from guardian.utils import get_obj_perms_model
from guardian.exceptions import NotUserNorGroup
User = get_user_model()
class GetAnonymousUserTest(TestCase):
def test(self):
anon = get_anonymous_user()
self.assertTrue(isinstance(anon, User))
class GetIdentityTest(ObjectPermissionTestCase):
def test_user(self):
user, group = get_identity(self.user)
self.assertTrue(isinstance(user, User))
self.assertEqual(group, None)
def test_anonymous_user(self):
anon = AnonymousUser()
user, group = get_identity(anon)
self.assertTrue(isinstance(user, User))
self.assertEqual(group, None)
def test_group(self):
user, group = get_identity(self.group)
self.assertTrue(isinstance(group, Group))
self.assertEqual(user, None)
def test_not_user_nor_group(self):
self.assertRaises(NotUserNorGroup, get_identity, 1)
self.assertRaises(NotUserNorGroup, get_identity, "User")
self.assertRaises(NotUserNorGroup, get_identity, User)
def test_multiple_user_qs(self):
user, group = get_identity(User.objects.all())
self.assertIsInstance(user, models.QuerySet)
self.assertIsNone(group)
def test_multiple_user_list(self):
user, group = get_identity([self.user])
self.assertIsInstance(user, list)
self.assertIsNone(group)
def test_multiple_group_qs(self):
user, group = get_identity(Group.objects.all())
self.assertIsInstance(group, models.QuerySet)
self.assertIsNone(user)
def test_multiple_group_list(self):
user, group = get_identity([self.group])
self.assertIsInstance(group, list)
self.assertIsNone(user)
@skipUnlessTestApp
class GetUserObjPermsModelTest(TestCase):
def test_for_instance(self):
project = Project(name='Foobar')
self.assertEqual(get_user_obj_perms_model(project),
ProjectUserObjectPermission)
def test_for_class(self):
self.assertEqual(get_user_obj_perms_model(Project),
ProjectUserObjectPermission)
def test_default(self):
self.assertEqual(get_user_obj_perms_model(ContentType),
UserObjectPermission)
def test_user_model(self):
# this test assumes that there were no direct obj perms model to User
# model defined (i.e. while testing guardian app in some custom
# project)
self.assertEqual(get_user_obj_perms_model(User),
UserObjectPermission)
@skipUnlessTestApp
class GetGroupObjPermsModelTest(TestCase):
def test_for_instance(self):
project = Project(name='Foobar')
self.assertEqual(get_group_obj_perms_model(project),
ProjectGroupObjectPermission)
def test_for_class(self):
self.assertEqual(get_group_obj_perms_model(Project),
ProjectGroupObjectPermission)
def test_default(self):
self.assertEqual(get_group_obj_perms_model(ContentType),
GroupObjectPermission)
def test_group_model(self):
# this test assumes that there were no direct obj perms model to Group
# model defined (i.e. while testing guardian app in some custom
# project)
self.assertEqual(get_group_obj_perms_model(Group),
GroupObjectPermission)
class GetObjPermsModelTest(TestCase):
def test_image_field(self):
class SomeModel(models.Model):
image = models.FileField(upload_to='images/')
obj = SomeModel()
perm_model = get_obj_perms_model(obj, UserObjectPermissionBase,
UserObjectPermission)
self.assertEqual(perm_model, UserObjectPermission)
def test_file_field(self):
class SomeModel2(models.Model):
file = models.FileField(upload_to='images/')
obj = SomeModel2()
perm_model = get_obj_perms_model(obj, UserObjectPermissionBase,
UserObjectPermission)
self.assertEqual(perm_model, UserObjectPermission)
|
11584382
|
def get_valid_actions(env, roll):
a = env.game.get_valid_plays(env.colors[env.agent_selection], roll)
return a
def to_bar(action, roll):
if action == 25: # bar
if roll < 0: # white
return ('bar', 24 - abs(roll))
else: # black
return ('bar', abs(roll) - 1)
else:
if action + roll - 1 > 23:
return (action - 1, 24)
elif action + roll - 1 < 0:
return (action - 1, -1)
else:
return (action - 1, action + roll - 1)
def from_bar(action):
bears_off = False
if action[1] == -1 or action[1] == 24:
bears_off = True
if action[0] == 'bar':
if action[1] > 12: # white, top
return (25, -(24 - action[1]), bears_off)
else: # black, bottom
return (25, (action[1] + 1), bears_off)
else:
return (action[0] + 1, action[1] - action[0], bears_off)
# action goes from single number to a tuple
def to_bg_format(action, roll):
base = 26
low_roll = min(roll)
high_roll = max(roll)
if action == base**2 * 2:
return (())
if action < base**2: # Low roll first
dig1 = action % base
dig2 = action // base
a = to_bar(dig1, low_roll)
b = to_bar(dig2, high_roll)
if b[0] != 'bar' and b[0] > -1:
return (a, b)
else:
return (a,)
else: # High roll first
action = action - base**2
dig1 = action % base
dig2 = action // base
a = to_bar(dig1, high_roll)
b = to_bar(dig2, low_roll)
if b[0] != 'bar' and b[0] > -1:
return (a, b)
else:
return (a,)
# takes list of tuples and converts to a discrete value
def to_gym_format(actions, roll):
high_roll = max(roll)
low_roll = min(roll)
nums = []
base = 26
for act in actions:
if len(act) == 1:
a, diff1, bears_off = from_bar(act[0])
if bears_off:
diff1 = high_roll if abs(diff1) > abs(low_roll) else low_roll
if abs(diff1) == abs(high_roll): # high first
a += base**2
nums.append(a)
elif isinstance(act[0], int) or act[0] == 'bar':
a, diff1, bears_off = from_bar(act)
if bears_off:
diff1 = high_roll if abs(diff1) > abs(low_roll) else low_roll
if abs(diff1) == abs(high_roll): # high first
a += base**2
nums.append(a)
elif len(act) == 2:
a, diff1, bears_off1 = from_bar(act[0])
b, diff2, bears_off2 = from_bar(act[1])
if bears_off1 or bears_off2:
if bears_off1 and not bears_off2:
if abs(diff2) == abs(high_roll):
diff1 = low_roll
else:
diff1 = high_roll
elif not bears_off1 and bears_off2:
if abs(diff1) == abs(high_roll):
diff2 = low_roll
else:
diff2 = high_roll
num = a + base * b
if diff1 > diff2: # high first
num += base**2
nums.append(num)
return nums
def double_roll(moves):
out = []
for move in moves:
if len(move) > 1:
out.append((move[0], move[1]))
else:
out.append(move[0])
return out
def opp_agent(env, agent):
return env.agents[0] if agent == env.agents[1] else env.agents[1]
def valid_action(env, action):
return env.action_spaces[env.agent_selection].contains(action)
|
11584438
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='mkdocs-add-number-plugin',
version='1.2.1',
description='MkDocs Plugin to automatically number the headings (h1-h6) '
'in each markdown page and number the nav.',
long_description=long_description,
long_description_content_type="text/markdown",
keywords='mkdocs index add-number plugin',
url='https://github.com/shihr/mkdocs-add-number-plugin.git',
author='ignorantshr',
author_email='<EMAIL>',
license='MIT',
python_requires='>=3.5',
install_requires=[
'mkdocs>=1.1'
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
entry_points={
'mkdocs.plugins': [
'mkdocs-add-number-plugin=mkdocs_add_number_plugin.plugin:AddNumberPlugin',
'add-number=mkdocs_add_number_plugin.plugin:AddNumberPlugin'
]
}
)
|
11584453
|
import tensorflow as tf
from tf_rbdl.liegroup import *
from tf_rbdl.kinematics import *
@tf.function
def id(theta,dtheta,ddtheta,g,pidlist,Mlist,Glist,Slist):
"""
Inverse dynamics.
Note that nbody == nq, since body only contains movable link.
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
dtheta (tf.Tensor):
Joint velocities
(N,nq)
ddtheta (tf.Tensor):
Joint accelerations
(N,nq)
g (tf.Tensor):
Gravity vector
(3,)
pidlist (tf.Tensor):
Parent body index.
(nbody,)
Mlist (tf.Tensor):
Link frame i relative to p(i) at the home position
(nbody,6,6)
Glist (tf.Tensor):
Spatial inertia matrices Gi of the links.
(nbody,6,6)
Slist (tf.Tensor):
Screw axes Si of the joints in a space frame.
(nq,6)
Returns
-------
tau (tf.Tensor):
Joint torques
(N,nq)
"""
N, nq = theta.shape
nbody = Mlist.shape[0]
Mi = tf.TensorArray(tf.float32, size=nbody, clear_after_read=False, colocate_with_first_write_call=True) # T_world_to_link_i : len([(4,4), (4,4), ..., (4,4)] = nbody
Mi = Mi.write(0, tf.eye(4))
Mi = Mi.write(0, Mlist[0])
Ai = tf.TensorArray(tf.float32, size=nq, clear_after_read=False) # Twist of joint i to the link : len([(1,6), (1,6), ..., (1,6)]) = nq
AdTi = tf.TensorArray(tf.float32, size=nbody, clear_after_read=False) # Adjoint_i_p(i) : len([(N,6,6), (N,6,6), ..., (N,6,6)]) = nbody
Vi = tf.TensorArray(tf.float32, size=nbody+1, clear_after_read=False, colocate_with_first_write_call=True) # len([(N,6), (N,6), ..., (N,6)]) = nbody+1, Vi[0] <-- World, Vi[1] <-- Link 0
Vi = Vi.write(0, tf.zeros((N, 6)))
Vdi = tf.TensorArray(tf.float32, size=nbody+1, clear_after_read=False) # len([(N,6), (N,6), ..., (N,6)]) = nbody+1, This starts from the world.
Vdi = Vdi.write(0, tf.tile(tf.expand_dims(tf.concat([tf.zeros(3), -g], axis=0), axis=0), tf.constant([N, 1])))
Fi = tf.TensorArray(tf.float32, size=nbody, clear_after_read=False, colocate_with_first_write_call=True)
Fi = Fi.write(0, tf.zeros((N,6))) # len([(N,6), (N,6), ..., (N,6)]) = nbody
tau = tf.TensorArray(tf.float32, size=nq, clear_after_read=False) # len([(N), (N), ..., (N)]) = nq
for i in tf.range(1,nbody):
Mi = Mi.write(i, tf.matmul(Mi.read(pidlist[i]),Mlist[i])) # (1,4,4)
for i in tf.range(nbody):
Ai = Ai.write(i, tf.squeeze(tf.matmul(adjoint(SE3_inv(tf.expand_dims(Mi.read(i),0))), tf.expand_dims(Slist[i], axis=1)), axis=2)) # (1, 6)
AdTi = AdTi.write(i, adjoint(tf.matmul(se3_to_SE3(vec_to_se3(Ai.read(i) * -tf.expand_dims(theta[:,i], axis=1))), SE3_inv(tf.expand_dims(Mlist[i],axis=0))))) # (N, 6, 6)
Vi = Vi.write(i+1, tf.squeeze(tf.matmul(AdTi.read(i),tf.expand_dims(Vi.read(pidlist[i]+1),axis=2)),axis=2) + Ai.read(i) * tf.expand_dims(dtheta[:,i],axis=1)) # (N, 6)
Vdi = Vdi.write(i+1, tf.squeeze(tf.matmul(AdTi.read(i),tf.expand_dims(Vdi.read(pidlist[i]+1),axis=2)),axis=2) + Ai.read(i) * tf.expand_dims(ddtheta[:,i],axis=1) + tf.squeeze(tf.matmul(ad(Vi.read(i+1)),tf.expand_dims(Ai.read(i),axis=2)),axis=2) * tf.expand_dims(dtheta[:,i],axis=1)) # (N, 6)
Fi = Fi.write(i, tf.squeeze(tf.matmul(tf.expand_dims(Glist[i],axis=0),tf.expand_dims(Vdi.read(i+1),axis=2)),axis=2) - tf.squeeze(tf.matmul(tf.transpose(ad(Vi.read(i+1)),perm=[0,2,1]), tf.matmul(tf.expand_dims(Glist[i],axis=0),tf.expand_dims(Vi.read(i+1),axis=2))),axis=2)) # (N,6)
for i in tf.range(nbody-1,0,-1):
Fi = Fi.write(pidlist[i], Fi.read(pidlist[i]) + tf.squeeze(tf.matmul(tf.transpose(AdTi.read(i), perm=[0,2,1]),tf.expand_dims(Fi.read(i),axis=2)),axis=2))
for i in tf.range(nq):
tau = tau.write(i, tf.squeeze(tf.matmul(tf.transpose(tf.expand_dims(Fi.read(i),axis=2),perm=[0,2,1]),tf.expand_dims(Ai.read(i),axis=2)),axis=[1,2])) # (N,)
return tf.transpose(tau.stack())
@tf.function
def mass_matrix(theta,pidlist,Mlist,Glist,Slist):
"""
Mass matrix
Parameters
----------
theta (tf.Tensor):
Joint angles
N x nq
pidlist (tf.Tensor):
Parent body index.
(nbody,)
Mlist (tf.Tensor):
Link frame i relative to p(i) at the home position
(nbody,6,6)
Glist (tf.Tensor):
Spatial inertia matrices Gi of the links.
(nbody,6,6)
Slist (tf.Tensor):
Screw axes Si of the joints in a space frame.
(nq,6)
Returns
-------
M (tf.Tensor):
Mass matrix
(N,nq,nq)
"""
N, nq = theta.shape
M = tf.TensorArray(tf.float32, size=nq, clear_after_read=False, colocate_with_first_write_call=True)
M = M.write(0,tf.zeros((N,nq)))
for i in tf.range(nq):
ddtheta = tf.TensorArray(tf.float32, size=nq, clear_after_read=False, colocate_with_first_write_call=True)
ddtheta = ddtheta.write(0,tf.zeros(N))
ddtheta = ddtheta.write(i,tf.ones(N))
M = M.write(i, id(theta,tf.zeros((N,nq)),tf.transpose(ddtheta.stack()),tf.zeros(3),pidlist,Mlist,Glist,Slist))
return tf.transpose(M.stack(), perm=[1,0,2])
@tf.function
def coriolis_forces(theta,dtheta,pidlist,Mlist,Glist,Slist):
"""
Coriolis
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
dtheta (tf.Tensor):
Joint velocities
(N,nq)
pidlist (tf.Tensor):
Parent body index.
(nbody,)
Mlist (tf.Tensor):
Link frame i relative to p(i) at the home position
(nbody,6,6)
Glist (tf.Tensor):
Spatial inertia matrices Gi of the links.
(nbody,6,6)
Slist (tf.Tensor):
Screw axes Si of the joints in a space frame.
(nq,6)
Returns
-------
b (tf.Tensor):
Coriolis vector
(N,nq)
"""
N, nq = theta.shape
return id(theta,dtheta,tf.zeros((N,nq)),tf.zeros(3),pidlist,Mlist,Glist,Slist)
@tf.function
def gravity_forces(theta,g,pidlist,Mlist,Glist,Slist):
"""
Gravity
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
pidlist (tf.Tensor):
Parent body index.
(nbody,)
Mlist (tf.Tensor):
Link frame i relative to p(i) at the home position
(nbody,6,6)
Glist (tf.Tensor):
Spatial inertia matrices Gi of the links.
(nbody,6,6)
Slist (tf.Tensor):
Screw axes Si of the joints in a space frame.
(nq,6)
Returns
-------
g (tf.Tensor):
Gravity vector
N x nq
"""
N, nq = theta.shape
return id(theta,tf.zeros((N,nq)),tf.zeros((N,nq)),g,pidlist,Mlist,Glist,Slist)
@tf.function
def fd(theta,dtheta,tau,g,pidlist,Mlist,Glist,Slist):
"""
Forward dynamics
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
dtheta (tf.Tensor):
Joint velocities
(N,nq)
pidlist (tf.Tensor):
Parent body index.
(nbody,)
Mlist (tf.Tensor):
Link frame i relative to p(i) at the home position
(nbody,6,6)
Glist (tf.Tensor):
Spatial inertia matrices Gi of the links.
(nbody,6,6)
Slist (tf.Tensor):
Screw axes Si of the joints in a space frame.
(nq,6)
Returns
-------
b (tf.Tensor):
Coriolis vector
(N,nq)
"""
N, nq = theta.shape
return tf.squeeze(tf.matmul(tf.linalg.inv(mass_matrix(theta,pidlist,Mlist,Glist,Slist)), tf.expand_dims(tau - coriolis_forces(theta,dtheta,pidlist,Mlist,Glist,Slist) - gravity_forces(theta,g,pidlist,Mlist,Glist,Slist),axis=2)), axis=2)
@tf.function
def euler_step(theta,dtheta,ddtheta,dt):
"""
Euler Step
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
dtheta (tf.Tensor):
Joint velocities
(N,nq)
ddtheta (tf.Tensor):
Joint accelerations
(N,nq)
dt (float):
Delta t
Returns
-------
(thetalistNext, dthetalistNext) (tupe of tf.Tensor):
Next joint angles and velocities
(N,nq), (N,nq)
"""
return theta + dt * dtheta, dtheta + dt * ddtheta
|
11584484
|
try:
from logging import NullHandler
except ImportError: # Python 2.6
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
from six import indexbytes
try:
from ssl import SSLError
except ImportError:
class SSLError(Exception):
pass
try:
memoryview = memoryview
except NameError:
memoryview = buffer
def get_byte(x, index):
return indexbytes(x, index)
def get_character(x, index):
return chr(get_byte(x, index))
def decode_string(x):
return str(x.decode('unicode_escape'))
def encode_string(x):
return x.encode('utf-8')
|
11584493
|
import os
from onsset import SettlementProcessor
from pandas import DataFrame,Series,cut
from pandas.testing import assert_frame_equal, assert_series_equal
from pytest import fixture
class TestSettlementProcessor:
@fixture
def setup_settlementprocessor(self) -> SettlementProcessor:
csv_path = os.path.join('test', 'test_data', 'dj-test.csv')
settlementprocessor = SettlementProcessor(csv_path)
return settlementprocessor
def test_classify_road_distance(self, setup_settlementprocessor):
sp = setup_settlementprocessor
df = DataFrame({'RoadDist':[0,11.954,14.282,10.472]})
actual = sp.classify_road_distance(df['RoadDist'])
print (actual)
expected =Series([5,3,3,3], name='RoadDist').astype(float)
print (expected)
assert_series_equal(actual, expected)
|
11584541
|
import faceutils as futils
from smart_path import smart_path
from pathlib import Path
from PIL import Image
import fire
import numpy as np
import tqdm
from collections import defaultdict
import pickle
from config import config
import dlib
from multiprocessing import Pool
from functools import partial
import os.path as osp
detector = dlib.get_frontal_face_detector()
def fast_detect(image: Image) -> 'faces':
# rescale image since detect face on smaller image is faster
size = 600
width, height = image.size
ratio = size / max(width, height)
resize_image = image.resize((int(width * ratio), int(height * ratio)))
resize_faces = detector(np.asarray(resize_image), 1)
# no face detected
if len(resize_faces) < 1:
return []
# rescale face box back
left = int(resize_faces[0].left() / ratio)
right = int(resize_faces[0].right() / ratio)
top = int(resize_faces[0].top() / ratio)
bottom = int(resize_faces[0].bottom() / ratio)
face = dlib.rectangle(left, top, right, bottom)
return [face]
def worker(image_path, out_dir):
try:
image = Image.open(image_path.open("rb"))
sub_dir = image_path.parent.name
file_name = image_path.name
out_file = out_dir.joinpath(sub_dir, file_name)
if not out_file.parent.exists():
out_file.parent.mkdir(parents=True, exist_ok=True)
with out_file.open("wb") as writer:
face = fast_detect(image)
if len(face) < 1:
return
face_on_image = face[0]
face_image, *_ = futils.dlib.crop(image, face_on_image, config.up_ratio, config.down_ratio, config.width_ratio)
face_image.save(writer, "PNG")
except:
print('Exception(Image.open) at: {}'.format(image_path.name))
def main(
image_dir="/data/makeup-transfer/face-style/",
out_dir="/data/datasets/beauty/crop/makeup-transfer-oss/face-style-focused",
show=False):
"""
dirs can also be S3 path such as s3://a/bc/
"""
image_dir = smart_path(image_dir)
out_dir = smart_path(out_dir)
partial_worker = partial(worker, out_dir = out_dir) # worker accept two parameters: image_path and out_dir
# multiprocessing
pool = Pool(8)
# TODO: tqdm bar, at now, you can look at out_dir to see progress
pool.map(partial_worker, image_dir.rglob("*"))
pool.close()
pool.join()
if __name__ == "__main__":
fire.Fire(main)
|
11584552
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import math
from torch.autograd import Variable
from torchvision.ops import box_iou
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, skip=True):
super(GraphConvolution, self).__init__()
self.skip = skip
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
# TODO make fc more efficient via "pack_padded_sequence"
# import ipdb; ipdb.set_trace()
support = torch.bmm(input, self.weight.unsqueeze(
0).expand(input.shape[0], -1, -1))
output = torch.bmm(adj, support)
#output = SparseMM(adj)(support)
if self.bias is not None:
output += self.bias.unsqueeze(0).expand(input.shape[0], -1, -1)
if self.skip:
output += support
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN_sim(nn.Module):
"""
Adapt from https://github.com/SunDoge/L-GCN/
"""
def __init__(self, dim_in, dim_hidden, dim_out, dropout, num_layers):
super(GCN_sim, self).__init__()
assert num_layers >= 1
self.fc_k = nn.Linear(dim_in, dim_hidden)
self.fc_q = nn.Linear(dim_in, dim_hidden)
dim_hidden = dim_out if num_layers == 1 else dim_hidden
self.gcs = nn.ModuleList([
GraphConvolution(dim_in, dim_hidden)
])
for i in range(num_layers - 1):
dim_tmp = dim_out if i == num_layers-2 else dim_hidden
self.gcs.append(GraphConvolution(dim_hidden, dim_tmp))
self.dropout = dropout
def construct_graph(self, x, length):
# TODO make fc more efficient via "pack_padded_sequence"
emb_k = self.fc_k(x)
emb_q = self.fc_q(x)
s = torch.bmm(emb_k, emb_q.transpose(1, 2))
s_mask = s.data.new(*s.size()).fill_(1).bool() # [B, T1, T2]
# Init similarity mask using lengths
for i, (l_1, l_2) in enumerate(zip(length, length)):
s_mask[i][:l_1, :l_2] = 0
s_mask = Variable(s_mask)
s.data.masked_fill_(s_mask.data, -float("inf"))
a_weight = F.softmax(s, dim=2) # [B, t1, t2]
# remove nan from softmax on -inf
a_weight.data.masked_fill_(a_weight.data != a_weight.data, 0)
return a_weight
def forward(self, x, length):
adj_sim = self.construct_graph(x, length)
for gc in self.gcs:
x = F.relu(gc(x, adj_sim))
x = F.dropout(x, self.dropout, training=self.training)
return x, adj_sim
class GCN(nn.Module):
def __init__(self, dim_in, dim_hidden, dim_out, dropout, skip, num_layers):
super(GCN, self).__init__()
self.skip = skip
self.GCN_sim = GCN_sim(dim_in, dim_hidden, dim_out, dropout, num_layers)
def forward(self, x, length, bboxes=None):
out, adj_sim = self.GCN_sim(x, length)
if self.skip:
out += x
return out, adj_sim
if __name__ == '__main__':
model = GCN(512, 128, 512, 0.5, skip=True, num_layers=2)
bs, T, N = 1, 5, 5
n_node = T*N
input = torch.rand(bs, n_node, 512)
length = torch.LongTensor([n_node] * bs)
output = model(input, length)
|
11584584
|
import os
def find_egg_info_dir(dir):
while 1:
try:
filenames = os.listdir(dir)
except OSError:
# Probably permission denied or something
return None
for fn in filenames:
if (fn.endswith('.egg-info')
and os.path.isdir(os.path.join(dir, fn))):
return os.path.join(dir, fn)
parent = os.path.dirname(dir)
if parent == dir:
# Top-most directory
return None
dir = parent
|
11584588
|
import os
import sys
from setuptools import setup
if sys.version_info[0] != 3:
raise RuntimeError('Unsupported python version "{0}"'.format(
sys.version_info[0]))
def _get_file_content(file_name):
with open(file_name, 'r') as file_handler:
return str(file_handler.read())
def get_long_description():
return _get_file_content('README.md')
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
INSTALL_REQUIRES = [
'pandas',
'requests',
]
else:
INSTALL_REQUIRES = [
'requests',
]
setup(
name="youtube-data-api",
version='0.0.21',
author="<NAME>, <NAME>",
description="youtube-data-api is a Python wrapper for the YouTube Data API.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
keywords='youtube-data-api youtube-data youtube-api wrapper youtube tweepy social-media',
url="https://github.com/mabrownnyu/youtube-data-api",
packages=['youtube_api'],
py_modules=['youtube_api'],
license="MIT",
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
install_requires=INSTALL_REQUIRES
)
|
11584693
|
from fractal import IFS
code = [
[0.195, -0.488, 0.344, 0.443, 0.4431, 0.2452, 0.2],
[0.462, 0.414, -0.252, 0.361, 0.2511, 0.5692, 0.2],
[-0.637, 0, 0, 0.501, 0.8562, 0.2512, 0.2],
[-0.035, 0.07, -0.469, 0.022, 0.4884, 0.5069, 0.2],
[-0.058, -0.07, -0.453, -0.111, 0.5976, 0.0969, 0.2]
]
ifs = IFS([500,500])
ifs.setCoordinate()
ifs.setPx(500, 0, 0)
ifs.setIfsCode(code)
ifs.doIFS(200000)
ifs.wait()
|
11584738
|
import numpy as np
import argparse
from tqdm import tqdm
import yaml
from attrdict import AttrMap
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data import TestDataset
from utils import gpu_manage, save_image
from models.gen.unet import UNet
def predict(config, args):
gpu_manage(args)
dataset = TestDataset(args.test_dir)
data_loader = DataLoader(dataset=dataset, num_workers=config.threads, batch_size=1, shuffle=False)
### MODELS LOAD ###
print('===> Loading models')
if config.gen_model == 'unet':
gen = UNet(in_ch=config.in_ch, out_ch=config.out_ch, gpu_ids=args.gpu_ids)
param = torch.load(args.pretrained)
gen.load_state_dict(param)
if args.cuda:
gen = gen.cuda(0)
with torch.no_grad():
for i, batch in enumerate(tqdm(data_loader)):
x = Variable(batch[0])
filename = batch[1][0]
if args.cuda:
x = x.cuda()
out = gen(x)
h = 1
w = 4
c = 3
p = config.size
allim = np.zeros((h, w, c, p, p))
x_ = x.cpu().numpy()[0]
out_ = out.cpu().numpy()[0]
in_rgb = x_[:3]
in_nir = x_[3]
out_rgb = np.clip(out_[:3], -1, 1)
out_cloud = np.clip(out_[3], -1, 1)
allim[0, 0, :] = np.repeat(in_nir[None, :, :], repeats=3, axis=0) * 127.5 + 127.5
allim[0, 1, :] = in_rgb * 127.5 + 127.5
allim[0, 2, :] = out_rgb * 127.5 + 127.5
allim[0, 3, :] = np.repeat(out_cloud[None, :, :], repeats=3, axis=0) * 127.5 + 127.5
allim = allim.transpose(0, 3, 1, 4, 2)
allim = allim.reshape((h*p, w*p, c))
save_image(args.out_dir, allim, i, 1, filename=filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--test_dir', type=str, required=True)
parser.add_argument('--out_dir', type=str, required=True)
parser.add_argument('--pretrained', type=str, required=True)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--gpu_ids', type=int, default=[0])
parser.add_argument('--manualSeed', type=int, default=0)
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
config = AttrMap(config)
predict(config, args)
|
11584803
|
import os
import unittest
from unittest.mock import patch
from configue.yaml_loader import YamlLoader
class TestYamlLoader(unittest.TestCase):
def setUp(self):
os.environ["my_var"] = "my_value"
os.environ["my_int_var"] = "10"
os.environ["my_bool_var"] = "false"
os.environ["my_list_var"] = "my_value,my_list_value"
os.environ["file_name"] = "list_config"
os.environ["my_home"] = "~"
yaml_file_path = os.path.join(os.path.dirname(__file__), "yaml_loader_config.yaml")
self.yaml_loader = YamlLoader(yaml_file_path)
def test_load_path_from_yaml_file(self):
my_object_keys = self.yaml_loader.load()
self.assertEqual(os.path.join(os.path.dirname(__file__), "my_path.txt"), my_object_keys["test_path1"])
self.assertEqual(os.path.join(os.path.dirname(__file__), "my_path/my_value.txt"), my_object_keys["test_path2"])
self.assertEqual(os.path.expanduser("~/my_value.txt"), my_object_keys["test_path3"])
self.assertEqual(os.path.expanduser("~/my_value.txt"), my_object_keys["test_path4"])
self.assertEqual(os.path.join(os.path.dirname(__file__), "my_path/~.txt"), my_object_keys["test_path5"])
@patch("logging.Logger.warning")
def test_load_env_from_yaml_file(self, mock_warning):
my_object_keys = self.yaml_loader.load()
self.assertEqual("my_value", my_object_keys["test_env1"])
self.assertEqual("my_value", my_object_keys["test_env2"])
self.assertIsNone(my_object_keys["test_env3"])
self.assertEqual("default_value", my_object_keys["test_env4"])
self.assertEqual("premy_valuepost", my_object_keys["test_env5"])
self.assertEqual("10", my_object_keys["test_env6"])
self.assertEqual(10, my_object_keys["test_env7"])
self.assertFalse(my_object_keys["test_env8"])
self.assertEqual("pre my_value and 10 post", my_object_keys["test_env9"])
mock_warning.assert_called_once_with(
"Missing environment var: 'my_unknown_var_without_default', no default is set")
def test_load_import_from_yaml_file(self):
my_object_keys = self.yaml_loader.load()
self.assertEqual(["my_str_value", "my_value"], my_object_keys["test_import_1"])
self.assertEqual(["my_str_value", "my_value"], my_object_keys["test_import_2"])
self.assertEqual(os.path.join(os.path.dirname(__file__), "sub_folder/my_file.txt"),
my_object_keys["test_import_3"])
self.assertEqual(os.path.join(os.path.dirname(__file__), "my_file.txt"), my_object_keys["test_import_4"])
def test_load_list_from_yaml_file(self):
my_object_keys = self.yaml_loader.load()
self.assertEqual(["my_value", "my_other_value"], my_object_keys["test_list_1"])
self.assertEqual(["my_value", "my_other_value"], my_object_keys["test_list_2"])
self.assertEqual(["my_value", "my_list_value"], my_object_keys["test_list_3"])
def test_load_unicode_from_yaml_file(self):
my_object_keys = self.yaml_loader.load()
self.assertEqual("🤖‼️", my_object_keys["test_unicode"])
|
11584805
|
import os
import random
import time
import warnings
import json
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from warmup_scheduler import GradualWarmupScheduler
from models import resnet_generic
from models.eb_resnet import EBBasicBlock, EBDeepBasicBlock
from bnn import BConfig, prepare_binary_model
from bnn.ops import BasicInputBinarizer, BasicScaleBinarizer, XNORWeightBinarizer
from utils.mixup import mixup_criterion, mixup_data
from utils.distillation_losses import LogitMatch, AttentionMatching
from utils.misc import *
from models.ebconv import EBConv2d
from opts import parser
best_acc1 = 0
def main():
args = parser.parse_args()
args_dict = vars(args)
print(args_dict)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
with open(f'{args.output_dir}/args.txt', 'w') as fd:
json.dump(args_dict, fd, indent=4)
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(
ngpus_per_node,
args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
num_classes = 1000
ignore_layers_name = [
'conv1',
'fc',
'$layer+[0-9]\.0\.downsample\.+[0-9]$']
# create model
print('=> creating model ...')
model = resnet_generic(
block_type=EBDeepBasicBlock if args.add_g_layer else EBBasicBlock,
structure=args.structure,
groups=args.num_groups,
expansion=args.expansion,
stem_type=args.stem_type,
num_classes=num_classes,
activation=nn.PReLU,
num_experts=args.num_experts,
use_only_first=args.use_only_first,
use_se=args.use_se,
downsample_ratio=args.downsample_ratio
)
bconfig = BConfig(
activation_pre_process=BasicInputBinarizer if args.binary_activations else nn.Identity,
activation_post_process=BasicScaleBinarizer,
weight_pre_process=XNORWeightBinarizer.with_args(
compute_alpha=False) if args.binary_weights else nn.Identity)
model = prepare_binary_model(
model,
bconfig=bconfig,
modules_mapping={
EBConv2d: EBConv2d},
ignore_layers_name=ignore_layers_name)
print(model)
print(f'Num paramters: {count_parameters(model)}')
# Load teacher config if needed
if args.teacher_config != '':
with open(args.teacher_config, 'r') as fd:
teacher_args = json.load(fd)
teacher = None
if args.teacher != '':
print('=> creating teacher model ')
teacher = resnet_generic(
block_type=EBDeepBasicBlock if teacher_args['add_g_layer'] else EBBasicBlock,
structure=teacher_args['structure'],
groups=teacher_args['num_groups'],
expansion=teacher_args['expansion'],
stem_type=teacher_args['stem_type'],
num_classes=num_classes,
activation=nn.PReLU,
num_experts=teacher_args['num_experts'],
use_only_first=teacher_args['use_only_first'],
use_se=teacher_args['use_se'],
downsample_ratio=teacher_args['downsample_ratio'])
bconfig = BConfig(
activation_pre_process=BasicInputBinarizer if teacher_args['binary_activations'] else nn.Identity,
activation_post_process=BasicScaleBinarizer,
weight_pre_process=XNORWeightBinarizer.with_args(
compute_alpha=False) if teacher_args['binary_weights'] else nn.Identity)
teacher = prepare_binary_model(
teacher,
bconfig=bconfig,
modules_mapping={
EBConv2d: EBConv2d},
ignore_layers_name=ignore_layers_name)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
if teacher is not None:
teacher.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(
(args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu])
if teacher is not None:
teacher = torch.nn.parallel.DistributedDataParallel(
teacher, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
if teacher is not None:
teacher.cuda()
teacher = torch.nn.parallel.DistributedDataParallel(teacher)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
if teacher is not None:
teacher = teacher.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available
# GPUs
model = torch.nn.DataParallel(model).cuda()
if teacher is not None:
teacher = torch.nn.DataParallel(teacher).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
criterion_kd = LogitMatch(
T=args.lab_match_T,
weight=args.lab_match_w) if args.lab_match else None
criterion_att = AttentionMatching(
args.att_transfer_weighting,
args.att_transfer_indicator) if args.att_transfer else None
parameters = model.parameters()
if args.optimizer == 'adamw':
wd = 0 if args.binary_weights else args.weight_decay
optimizer = torch.optim.AdamW(parameters, args.lr, weight_decay=wd)
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(parameters, args.lr)
elif args.optimizer == 'sgd':
wd = 0 if args.binary_weights else args.weight_decay
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=wd)
else:
raise ValueError(f'Unknown optimizer selected: {args.optimizer}')
if args.scheduler == 'multistep':
milestone = [40, 70, 80, 100, 110]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[x - args.warmup for x in milestone], gamma=0.1)
elif args.scheduler == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs - args.warmup), eta_min=0)
else:
raise ValueError(f'Unknown schduler selected: {args.scheduler}')
if args.warmup > 0:
print(f'=> Applying warmup ({args.warmup} epochs)')
lr_scheduler = GradualWarmupScheduler(
optimizer,
multiplier=1,
total_epoch=args.warmup,
after_scheduler=lr_scheduler)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
if args.resume_epoch:
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
pass
# best_acc1 may be from a checkpoint from a different GPU
#best_acc1 = best_acc1.to(args.gpu)
try:
model.load_state_dict(checkpoint['state_dict'])
if not ('adam' in args.optimizer and 'sgd' in args.resume):
print('=> Loading optimizer...')
# optimizer.load_state_dict(checkpoint['optimizer'])
except BaseException:
print('=> Warning: dict model mismatch, loading with strict = False')
model.load_state_dict(checkpoint['state_dict'], strict=False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Reset learning rate
for g in optimizer.param_groups:
g['lr'] = args.lr
if args.expansion_stage:
print('Expanding the weights...')
for module in model.modules():
if isinstance(module, EBConv2d):
if not isinstance(
module.activation_pre_process,
nn.Identity):
print(
f'Init module with w shape = {module.weight.size()}')
for i in range(1, args.num_experts):
module.weight.data[i, ...].copy_(
module.weight.data[0, ...])
if args.start_epoch > 0:
print(f'Advancing the scheduler to epoch {args.start_epoch}')
for i in range(args.start_epoch):
lr_scheduler.step()
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'valid')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transforms_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transforms_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(
traindir,
transforms_train)
val_dataset = datasets.ImageFolder(valdir, transforms_val)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(
train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
show_logs = (
not args.multiprocessing_distributed) or (
args.multiprocessing_distributed and args.rank %
ngpus_per_node == 0)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if args.scheduler == 'cosine':
lr_scheduler.step(epoch)
else:
lr_scheduler.step()
if show_logs:
print(f'New lr: {lr_scheduler.get_last_lr()}')
# train for one epoch
train(
train_loader,
model,
teacher,
criterion,
optimizer,
epoch,
args,
criterion_kd=criterion_kd,
criterion_att=criterion_att,
show_logs=show_logs)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args, show_logs)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
print(f'Current best: {best_acc1}')
if show_logs:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.output_dir)
def train(
train_loader,
model,
teacher,
criterion,
optimizer,
epoch,
args,
criterion_kd=None,
criterion_att=None,
show_logs=True):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
all_meters = [batch_time, data_time, losses, top1, top5]
if criterion_kd is not None:
losses_kd = AverageMeter('Loss KD', ':.4e')
all_meters.append(losses_kd)
if criterion_att is not None:
losses_att = AverageMeter('Loss Att', ':.4e')
all_meters.append(losses_att)
progress = ProgressMeter(
len(train_loader),
all_meters,
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
if args.use_mixup:
images, target_a, target_b, lam = mixup_data(
images, target, args.alpha)
if teacher is not None:
with torch.no_grad():
output_teacher, teacher_interim = teacher(images)
# compute output
output, interim = model(images)
loss = mixup_criterion(
criterion,
output,
target_a,
target_b,
lam) if args.use_mixup else criterion(
output,
target)
loss_att = criterion_att(
interim, teacher_interim) if criterion_att is not None else 0
loss += loss_att
loss_kd = criterion_kd(
output_s=output,
output_t=output_teacher) if criterion_kd is not None else 0
loss += loss_kd
# measure accuracy and record loss
if args.use_mixup:
acc1a, acc5a = accuracy(output, target_a, topk=(1, 5))
acc1b, acc5b = accuracy(output, target_b, topk=(1, 5))
acc1 = lam * acc1a + (1 - lam) * acc1b
acc5 = lam * acc5a + (1 - lam) * acc5b
else:
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
if criterion_kd is not None:
losses_kd.update(loss_kd.item(), images.size(0))
if criterion_att is not None:
losses_att.update(loss_att.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and show_logs:
progress.display(i)
def validate(val_loader, model, criterion, args, show_logs=True):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output, _ = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and show_logs:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
if show_logs:
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
if __name__ == '__main__':
main()
|
11584806
|
import os
import numpy as np
import cv2
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
import time
from typing import List
from visualDet3D.networks.utils import DETECTOR_DICT
from visualDet3D.networks.backbones import resnet
from visualDet3D.networks.lib.coordconv import CoordinateConv
from visualDet3D.networks.lib.blocks import ConvBnReLU
from visualDet3D.networks.detectors.unet.u_net import UNet_Core
from visualDet3D.networks.heads.monodepth_loss import MonodepthLoss
def preprocess_sum_avg(sum_pred:np.ndarray, num_pred:np.ndarray)->np.ndarray:
#avg_precompute = sum_pred / num_pred
#avg_blurred = cv2.blur(avg_precompute, (21, 21))
#horizontal_mean = np.mean(avg_blurred, axis=1)#np.sum(avg_blurred * num_pred, axis=1) / np.sum(num_pred, axis=1)
return np.sum(sum_pred) / np.sum(num_pred)
def reshape_depth(gt_depth, shape):
"""shape[H, W]
"""
mask = gt_depth < 0.1
inverse_gt = 1.0 / (gt_depth + 1e-9)
inverse_gt[mask] = 1e-9
inverse_gt_reshape = F.adaptive_max_pool2d(inverse_gt, shape)
reshaped_gt = 1.0 / (inverse_gt_reshape + 1e-9)
reshaped_gt[inverse_gt_reshape < 1e-8] = 0
return reshaped_gt
@DETECTOR_DICT.register_module
class MonoDepth(nn.Module):
""" MonoDepthDorn modified from
https://arxiv.org/pdf/1806.02446.pdf
"""
def __init__(self, network_cfg):
super(MonoDepth, self).__init__()
self.max_depth = getattr(network_cfg, 'max_depth', 50)
self.output_channel = getattr(network_cfg, 'output_channel', 1)
self.backbone_arguments = getattr(network_cfg, 'backbone')
feature_size = getattr(network_cfg, 'feature_size', 256)
self.SI_loss_lambda = getattr(network_cfg, 'SI_loss_lambda', 0.3)
self.smooth_weight = getattr(network_cfg, 'smooth_loss_weight', 0.003)
self.minor_weight = getattr(network_cfg, 'minor_weight', 0.000)
sum_file = os.path.join(network_cfg.preprocessed_path, 'training', 'log_depth_sum.npy')
num_file = os.path.join(network_cfg.preprocessed_path, 'training', 'log_depth_solid.npy')
sum_precompute = np.load(sum_file) #[H]
num_precompute = np.load(num_file) #[H]
self.register_buffer("prior_mean", torch.tensor(preprocess_sum_avg(sum_precompute, num_precompute), dtype=torch.float32))
self.core = UNet_Core(3, self.output_channel, backbone_arguments=self.backbone_arguments)
self.semi_loss = MonodepthLoss()
def training_forward(self, img_batch:torch.FloatTensor, K:torch.FloatTensor, gts:torch.FloatTensor):
"""Forward methods in training.
Args:
img_batch (torch.FloatTensor): [B, C, H, W] tensor
K (torch.FloatTensor): calibration matrix [B, 3, 3]
gts (torch.FloatTensor): [B, H, W] unnormalized depth map tensor
Returns:
loss (torch.Tensor)
loss_dict (Dict[str, float])
"""
N, C, H, W = img_batch.shape
feat = self.core(img_batch, K)
loss = 0
for key in feat:
#base = F.adaptive_avg_pool1d(self.prior_mean.reshape([1, 1, -1]), feat[key].shape[2])
depth_prediction = torch.exp(self.prior_mean + feat[key]).squeeze(1)
shape = [depth_prediction.shape[1], depth_prediction.shape[2]]
reshaped_gt = reshape_depth(gts, shape)
diff = torch.log(depth_prediction) - torch.log(reshaped_gt)
num_pixels = torch.sum((reshaped_gt > 0.1) * (reshaped_gt < self.max_depth))
diff = torch.where(
(reshaped_gt > 0.1) * (reshaped_gt < self.max_depth) * (torch.abs(diff) > 0.001),
diff,
torch.zeros_like(diff)
)
lamda = self.SI_loss_lambda
loss1 = torch.sum(diff ** 2) / num_pixels - lamda * ((torch.sum(diff) / num_pixels) ** 2)
smooth_loss = self.semi_loss.smooth_loss(feat[key], F.adaptive_avg_pool2d(img_batch, shape))
if key == 'scale_1':
loss += (loss1 + self.smooth_weight * smooth_loss)
else:
loss += self.minor_weight * (loss1 + self.smooth_weight * smooth_loss)
loss_dict = dict(total_loss=loss)
return loss, loss_dict
def test_forward(self, img_batch:torch.Tensor, P2:torch.Tensor):
"""Forward methods for testing
Args:
img_batch (torch.Tensor): image inputs [B, C, H ,W]
P2 (torch.Tensor): camera calibration [B, 3, 3]
Returns:
Dict[str, torch.Tensor]: predicted unnormalized depth map.
"""
N, C, H, W = img_batch.shape
feat = self.core(img_batch, P2)
# depth_prediction = 1/torch.sigmoid(feat) - 1
# softmax_feat = torch.softmax(feat, dim=1)
depth_prediction = torch.exp(self.prior_mean + feat['scale_1'])
# depth_prediction = (self.depths * softmax_feat).sum(dim=1, keepdim=True)
assert(torch.all(depth_prediction > 0))
return {"target": depth_prediction} #, "prob": [prob], "label": [label]}
def forward(self, inputs):
if isinstance(inputs, list) and len(inputs) == 3:
img_batch, K, gts = inputs
return self.training_forward(img_batch, img_batch.new(K), gts)
else:
img_batch, K = inputs
return self.test_forward(img_batch, K)
|
11584821
|
from lxml import html
from pprint import pprint # noqa
API_URL = "https://www.eccourts.org/api/get_posts/"
def clean_text(text):
try:
return html.fromstring(text).text
except Exception:
return text
def emit_attachment(context, post, attachment):
meta = {
"title": clean_text(attachment.get("title")),
"summary": clean_text(attachment.get("description")),
"languages": ["en"],
"author": post.get("author", {}).get("name"),
"published_at": post.get("date"),
"modified_at": post.get("modified"),
"mime_type": attachment.get("mime_type"),
"foreign_id": attachment.get("url"),
"url": attachment.get("url"),
"keywords": [],
}
for cat in post.get("categories", []):
meta["keywords"].append(cat.get("title"))
context.emit(data=meta)
def posts(context, data):
page = data.get("page", 0)
result = context.http.get(API_URL, params={"page": page})
if not result.ok:
context.emit_warning("Response failure: %r" % result)
return
for post in result.json.get("posts"):
for attachment in post.get("attachments"):
if "image/" in attachment.get("mime_type"):
continue
emit_attachment(context, post, attachment)
pages = result.json.get("pages")
if pages > page:
context.recurse(data={"page": page + 1})
|
11584841
|
import fnmatch
import os
def is_dir(path: str) -> bool:
return isinstance(path, str) and os.path.exists(path) and os.path.isdir(path)
def is_file(path: str) -> bool:
return isinstance(path, str) and os.path.exists(path) and os.path.isfile(path)
def get_file_extension(path: str) -> str:
return os.path.splitext(path)[1] if is_file(path) else ''
def get_file_name(path: str) -> str:
return os.path.splitext(os.path.basename(path))[0] if is_file(path) else ''
def make_dir_if_not_exists(path: str):
if not is_dir(path): os.makedirs(path)
def get_files_in_dir(path: str, pattern: str = '*'):
for root, _, files in os.walk(path):
for f in files:
if fnmatch.fnmatch(f, pattern):
yield os.path.realpath(os.path.join(root, f))
def get_immediate_subdirs(path: str) -> [str]:
return [os.path.join(path, s) for s in next(os.walk(path))[1]] if is_dir(path) else []
|
11584896
|
import pytest
from django_performance_testing.queries import classify_query
@pytest.mark.parametrize('sql', [
'QUERY = u\'SELECT "auth_group"."id", "auth_group"."name" FROM "auth_group"\' - PARAMS = ()', # noqa
'QUERY = \'SELECT "auth_group"."id", "auth_group"."name" FROM "auth_group"\' - PARAMS = ()', # noqa
], ids=['py2', 'py3'])
def test_can_classify_select(sql):
assert 'read' == classify_query(sql)
@pytest.mark.parametrize('sql', [
'QUERY = \'INSERT INTO "auth_group" ("name") VALUES (%s)\' - PARAMS = (\'foo\',)', # noqa
'QUERY = u\'INSERT INTO "auth_group" ("name") VALUES (%s)\' - PARAMS = (\'foo\',)', # noqa
], ids=['py2', 'py3'])
def test_can_classify_insert(sql):
assert 'write' == classify_query(sql)
@pytest.mark.parametrize('sql', [
'QUERY = \'UPDATE "auth_group" SET "name" = %s\' - PARAMS = (\'bar\',)', # noqa
'QUERY = u\'UPDATE "auth_group" SET "name" = %s\' - PARAMS = (\'bar\',)', # noqa
], ids=['py2', 'py3'])
def test_can_classify_update(sql):
assert 'write' == classify_query(sql)
@pytest.mark.parametrize('sql', [
'QUERY = \'DELETE FROM "auth_group" WHERE "auth_group"."id" IN (%s)\' - PARAMS = (1,)', # noqa
'QUERY = u\'DELETE FROM "auth_group" WHERE "auth_group"."id" IN (%s)\' - PARAMS = (1,)', # noqa
], ids=['py2', 'py3'])
def test_can_classify_delete(sql):
assert 'write' == classify_query(sql)
def test_can_classify_even_if_it_doesnt_have_the_query_prefix():
sql = 'SELECT "auth_group"."id", "auth_group"."name" FROM "auth_group"'
assert 'read' == classify_query(sql)
def test_can_classify_even_if_it_has_quotes_inside():
sql = 'SELECT \'auth_group\'.\'id\', "auth_group"."name" FROM "auth_group"'
assert 'read' == classify_query(sql)
def test_when_cannot_classifies_error_includes_full_sql():
sql = 'unrecognizable sql statement'
assert 'other' == classify_query(sql)
|
11584898
|
from airdraw.config import BaseConfig
class AppConfig(BaseConfig):
NAME = 'airdraw'
VERSION = (0, 1, 0)
WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 768
CANVAS_BACKGROUND_COLOR = '#000000'
|
11584899
|
import bottle
class Plugin(object):
'''Bottle plugin to handle SSL client certificates.'''
name = 'one.infinit.ssl-client-certificate'
api = 2
key_dn = 'SSL_CLIENT_DN'
key_ok = 'SSL_CLIENT_VERIFIED'
def __init__(self):
pass
def apply(self, callback, route):
def wrapper(*args, **kwargs):
environ = bottle.request.environ
bottle.request.certificate = None
if Plugin.key_ok in environ and Plugin.key_dn in environ:
if environ[Plugin.key_ok] == 'SUCCESS':
try:
dn = environ[Plugin.key_dn]
field = dn.split('/')[-1]
email = field.split('=')[-1]
except:
pass
else:
bottle.request.certificate = email
return callback(*args, **kwargs)
return wrapper
|
11584919
|
import argparse
import os
import sys
from .mapping import AttackMapping
def parse_args():
args = argparse.ArgumentParser(prog="attack-lookup", description="MITRE ATT&CK Lookup Tool", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args.add_argument("-v", "--version", default="v10.1", help="ATT&CK matrix version to use")
args.add_argument("-m", "--matrix", choices=["enterprise", "ics", "mobile"], default="enterprise", help="ATT&CK matrix to use")
args.add_argument("-O", "--offline", action="store_true", help="Run in offline mode")
args.add_argument("-i", "--input", help="Path to input file (one lookup value per line)")
args.add_argument("-o", "--output", default="-", help="Path to output file")
args.add_argument("--output-mode", choices=["results", "csv"], default="results", help="Mode for output file (\"result\" only has the lookup results, \"csv\" outputs a CSV with the lookup and result values")
return args.parse_args()
def do_interactive(mapping: AttackMapping):
print("Running attack-lookup in interactive mode, exit with (q)uit. Enter one or more values to lookup, separated by a comma.")
while True:
# get input
try:
in_str = input("ATT&CK> ")
except (EOFError, KeyboardInterrupt):
print("") # this adds a newline and makes the CLI prompt cleaner when exiting
break
# check if we are quitting
if in_str.lower() in ("q", "quit"):
break
# not quitting, do the lookups
for x in in_str.split(","):
print(mapping.lookup(x))
def do_batch(mapping: AttackMapping, input_file: str, output_file: str, output_mode: str) -> bool:
# read in the input file
try:
with open(input_file, "r") as f:
input_data = [x.strip() for x in f.readlines()]
except FileNotFoundError:
print(f"Failed to open {input_file}, is the path/permissions correct?")
return False
# do the item lookups
output_items = []
for i in input_data:
output_items.append(mapping.lookup(i))
# prepare the output data
output_data = ""
if output_mode == "results":
# no input data needed in the output
output_data = os.linesep.join(output_items)
elif output_mode == "csv":
# output data should be a CSV, each line with the input and output values
data = []
# make sure input and output data is the same length (this should always pass)
assert len(input_data) == len(output_items)
# build the csv lines
for i in range(len(input_data)):
data.append(",".join([input_data[i], output_items[i]]))
# make the file contents
output_data = os.linesep.join(data)
if output_file == "-":
# output should just go to stdout
print(output_data)
else:
# output to file path
try:
with open(output_file, "w") as f:
f.write(output_data)
f.write(os.linesep)
print(f"Wrote output data to {output_file}")
except PermissionError:
print(f"Failed to write to {output_file}, bad path/permissions?")
return False
return True
def main():
args = parse_args()
# load the proper att&ck mapping
mapping = AttackMapping(args.matrix, args.version, args.offline)
if not mapping.load_data():
return
# do interactive mode if no input file was specified
if not args.input:
do_interactive(mapping)
return
# input file present, run in batch mode
if not do_batch(mapping, args.input, args.output, args.output_mode):
sys.exit(1)
if __name__ == "__main__":
main()
|
11584975
|
import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from fuseprop.nnutils import *
from fuseprop.mol_graph import MolGraph
from fuseprop.rnn import GRU, LSTM
class MPNEncoder(nn.Module):
def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth):
super(MPNEncoder, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.depth = depth
self.W_o = nn.Sequential(
nn.Linear(node_fdim + hidden_size, hidden_size),
nn.ReLU()
)
if rnn_type == 'GRU':
self.rnn = GRU(input_size, hidden_size, depth)
elif rnn_type == 'LSTM':
self.rnn = LSTM(input_size, hidden_size, depth)
else:
raise ValueError('unsupported rnn cell type ' + rnn_type)
def forward(self, fnode, fmess, agraph, bgraph, mask):
h = self.rnn(fmess, bgraph)
h = self.rnn.get_hidden_state(h)
nei_message = index_select_ND(h, 0, agraph)
nei_message = nei_message.sum(dim=1)
node_hiddens = torch.cat([fnode, nei_message], dim=1)
node_hiddens = self.W_o(node_hiddens)
if mask is None:
mask = torch.ones(node_hiddens.size(0), 1, device=fnode.device)
mask[0, 0] = 0 #first node is padding
return node_hiddens * mask, h
class GraphEncoder(nn.Module):
def __init__(self, avocab, rnn_type, embed_size, hidden_size, depth):
super(GraphEncoder, self).__init__()
self.avocab = avocab
self.hidden_size = hidden_size
self.atom_size = atom_size = avocab.size() + MolGraph.MAX_POS
self.bond_size = bond_size = len(MolGraph.BOND_LIST)
self.E_a = torch.eye( avocab.size() ).cuda()
self.E_b = torch.eye( len(MolGraph.BOND_LIST) ).cuda()
self.E_pos = torch.eye( MolGraph.MAX_POS ).cuda()
self.encoder = MPNEncoder(rnn_type, atom_size + bond_size, atom_size, hidden_size, depth)
def embed_graph(self, graph_tensors):
fnode, fmess, agraph, bgraph, _ = graph_tensors
fnode1 = self.E_a.index_select(index=fnode[:, 0], dim=0)
fnode2 = self.E_pos.index_select(index=fnode[:, 1], dim=0)
hnode = torch.cat([fnode1, fnode2], dim=-1)
fmess1 = hnode.index_select(index=fmess[:, 0], dim=0)
fmess2 = self.E_b.index_select(index=fmess[:, 2], dim=0)
hmess = torch.cat([fmess1, fmess2], dim=-1)
return hnode, hmess, agraph, bgraph
def forward(self, graph_tensors):
tensors = self.embed_graph(graph_tensors)
hatom,_ = self.encoder(*tensors, mask=None)
return hatom
|
11585008
|
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def tempdir():
temp = tempfile.mkdtemp()
yield temp
shutil.rmtree(temp)
|
11585033
|
import numpy as np
import codecs
import cv2
import matplotlib.pylab as plt
import read_data
import config
train_images_dir = config.train_images_dir
train_labels_dir = config.train_labels_dir
save_images_dir = "train_images/resize_images/"
save_labels_dir = "train_images/resize_ground_truth/"
def test_main():
image = cv2.imread(train_images_dir + "img_1.jpg")
shape = image.shape
image_resized = cv2.resize(image, (512, 512))
cv2.imwrite(save_images_dir + "img_1.jpg", image_resized)
import IPython
labels = read_data.read_ground_truth(train_labels_dir, 1)
j = 0
IPython.embed()
for box in labels[0]:
img_crop = image[box[1]: box[5], box[0]:box[4]]
cv2.imwrite(save_images_dir + str(j) + "img.jpg", img_crop)
j += 1
for box in labels[0]:
for i in range(0, 8, 2):
box[i] = (int)(box[i] * 512 / shape[1])
for i in range(1, 8, 2):
box[i] = (int)(box[i] * 512 / shape[0])
img_crop = image_resized[box[1]: box[5], box[0]:box[4]]
cv2.imwrite(save_images_dir + str(j) + "img.jpg", img_crop)
j += 1
def resize_images():
all_images = read_data.read_datasets(train_images_dir, config.all_trains, order="hwc")
i = 1
for image in all_images:
image = cv2.resize(image, (512, 512))
cv2.imwrite(save_images_dir + "img_" + str(i) + ".jpg", image)
if i % 100 == 0:
print(i)
i += 1
def resize_labels():
all_labels = read_data.read_ground_truth(train_labels_dir, config.all_trains)
image_width = config.image_width
image_height = config.image_width
j = 1
for boxes in all_labels:
with open(save_labels_dir + "gt_img_" + str(j) + ".txt", "w") as f:
for box in boxes:
for i in range(0, 8, 2):
box[i] = (int)(box[i] * 512 / image_width )
for i in range(1, 8, 2):
box[i] = (int)(box[i] * 512 / image_height)
for num in box:
f.write(str(num) + ", ")
f.write("\n")
j += 1
def main():
resize_labels()
if __name__ == "__main__":
main()
|
11585051
|
import os
import time
def map_maybe(f, lst):
return [f(x) if x is not None else None for x in lst]
def measure(f):
t0 = time.time()
result = f()
duration = time.time() - t0
return duration, result
def one_based_range(n):
return range(1, 1 + n)
def show_duration(duration):
if duration < 1:
return '%.2fms' % (duration * 1e3)
if duration < 60:
return '%.2fs' % duration
sec = int(duration)
mm, ss = sec / 60, sec % 60
if duration < 3600:
return '%dm%ds' % (mm, ss)
return '%dh%dm%ds' % (mm / 60, mm % 60, ss)
def _since_job_start():
t0 = os.getenv('KUNGFU_JOB_START_TIMESTAMP') or '0'
t0 = int(t0)
return time.time() - t0
def _since_proc_start():
t0 = os.getenv('KUNGFU_PROC_START_TIMESTAMP') or '0'
t0 = int(t0)
return time.time() - t0
def _log_event(name):
t0 = os.getenv('KUNGFU_PROC_START_TIMESTAMP') or '0'
t0 = int(t0)
t1 = time.time()
d = t1 - t0
# d = _since_proc_start()
print('TS=%f %s :: %s since proc started' % (t1, name, show_duration(d)))
|
11585100
|
from pathlib import Path
import torch
import numpy as np
from torchvision import transforms as trans
import json
class Config(object):
data_path = Path('data')
coco_path = data_path/'coco2014'
anno_path = coco_path/'annotations'
train_path = coco_path/'train2017'
val_path = coco_path/'val2017'
test_path = coco_path/'test2017'
train_anno_path = anno_path/'instances_train2017.json'
val_anno_path = anno_path/'instances_val2017.json'
pretrained_model_path = 'models/lighthead-rcnn-extractor-pretrained.pth'
work_space = Path('work_space')
log_path = work_space/'log'
min_sizes = [600, 700, 800, 900, 1000]
# min_sizes = [1000] # delete this when finish debug
max_size = 1400
class_num = 80
roi_size = 7
font_size = 10
spatial_scale = 1/16.
with open(data_path/'coco_maps.json', 'r') as f:
maps = json.load(f)
correct_id_2_class = maps[2]
board_loss_interval = 100
eval_interval = 10
eval_coco_interval = 4
board_pred_image_interval = 5
save_interval = 12
eva_num_during_training = 500
coco_eva_num_during_training = 600
# test only
# eva_num_during_training = 10
# coco_eva_num_during_training = 12
mean = np.array([[[122.7717]], [[115.9465]], [[102.9801]]], dtype=np.float)
# std = [1., 1., 1.]
# transform = trans.Compose([
# trans.Normalize(mean, std),
# lambda img : img * 255.
# ])
# std_tensor = torch.Tensor(std).view(3,1,1)
mean_tensor = torch.Tensor(mean)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
batch_size = 1
lr = 0.0003
momentum = 0.9
weight_decay = 1e-4
rpn_sigma = 3.
roi_sigma = 1.
n_ohem_sample = 256
loc_normalize_mean = (0., 0., 0., 0.)
loc_normalize_std = (0.1, 0.1, 0.2, 0.2)
softnms_Nt = 0.3
softnms_method = 2
softnms_sigma = 0.5
softnms_min_score = 0.001
loc_std_tensor = torch.tensor(loc_normalize_std, dtype=torch.float).to(device)
loc_mean_tensor = torch.tensor(loc_normalize_mean, dtype=torch.float).to(device)
|
11585112
|
from lightbus import Api, Event, Parameter
from lightbus.creation import create
bus = create()
class AuthApi(Api):
user_registered = Event(parameters=[Parameter("username", str)])
class Meta:
name = "auth"
def check_password(self, username: str, password: str):
return username == "admin" and password == "<PASSWORD>"
|
11585135
|
from mkreports import md
def test_list():
basic_text = (
md.H1("First header") + md.H2("Second header") + md.P("This is a paragraph")
)
assert basic_text.body.format_text(" ", "a") == (
"\n\n# First header\n\n## Second header\n\nThis is a paragraph\n\n"
)
|
11585143
|
from credmark.cmf.engine.model_unittest import ModelTestCase, model_context
class ExampleEchoTest(ModelTestCase):
@model_context(chain_id=1, block_number=12345)
def test_echo(self):
# sanity check that the context is as expected
self.assertEqual(self.context.block_number, 12345)
# run the model
message = 'echo-test'
output = self.context.models.example.model(message=message)
self.logger.debug(output)
self.assertEqual(output['title'], '1. Example - Model')
self.assertEqual(output['echo'],
f'{message} from block: {self.context.block_number}')
self.assertIsNotNone(output.get('logs'))
|
11585186
|
import datetime
import binsync.data
from . import ui_version
if ui_version == "PySide2":
from PySide2.QtWidgets import QVBoxLayout, QGroupBox, QWidget, QLabel, QTabWidget, QTableWidget, QStatusBar
from PySide2.QtCore import Signal
elif ui_version == "PySide6":
from PySide6.QtWidgets import QVBoxLayout, QGroupBox, QWidget, QLabel, QTabWidget, QTableWidget, QStatusBar
from PySide6.QtCore import Signal
else:
from PyQt5.QtWidgets import QVBoxLayout, QGroupBox, QWidget, QLabel, QTabWidget, QTableWidget, QStatusBar
from PyQt5.QtCore import pyqtSignal as Signal
from .tables.functions_table import QFunctionTable
from .tables.activity_table import QActivityTable
from .tables.ctx_table import QCTXTable
from .tables.globals_table import QGlobalsTable
class ControlPanel(QWidget):
update_ready = Signal()
ctx_change = Signal()
def __init__(self, controller, parent=None):
super(ControlPanel, self).__init__(parent)
self.controller = controller
self.tables = {}
self._init_widgets()
# register controller callback
self.update_ready.connect(self.reload)
self.controller.ui_callback = self.update_callback
self.ctx_change.connect(self._reload_ctx)
self.controller.ctx_change_callback = self.ctx_callback
def update_callback(self):
"""
This function will be called in another thread, so the work
done here is guaranteed to be thread safe.
@return:
"""
self._update_table_data()
self.update_ready.emit()
def ctx_callback(self):
if isinstance(self.controller.last_ctx, binsync.data.Function):
self._ctx_table.update_table(new_ctx=self.controller.last_ctx.addr)
self.ctx_change.emit()
def reload(self):
# check if connected
if self.controller and self.controller.check_client():
self._reload_tables()
# update status
status = self.controller.status_string() if self.controller else "Disconnected"
self._status_label.setText(status)
def closeEvent(self, event):
if self.controller is not None:
self.controller.client_init_callback = None
def _init_widgets(self):
# status bar
self._status_label = QLabel(self)
self._status_label.setText(self.controller.status_string())
self._status_bar = QStatusBar(self)
self._status_bar.addPermanentWidget(self._status_label)
# control box
control_layout = QVBoxLayout()
# tabs for tables
self.tabView = QTabWidget()
# add tables to tabs
self._ctx_table = QCTXTable(self.controller)
self._func_table = QFunctionTable(self.controller)
self._global_table = QGlobalsTable(self.controller)
self._activity_table = QActivityTable(self.controller)
self.tabView.addTab(self._ctx_table, "Context")
self.tabView.addTab(self._func_table, "Functions")
self.tabView.addTab(self._global_table, "Globals")
self.tabView.addTab(self._activity_table, "Activity")
self.tables.update({
"context": self._ctx_table,
"functions": self._func_table,
"globals": self._global_table,
"activity": self._activity_table
})
main_layout = QVBoxLayout()
main_layout.addWidget(self.tabView)
main_layout.addWidget(self._status_bar)
self.setLayout(main_layout)
def _reload_ctx(self):
ctx_name = self.controller.last_ctx.name or ""
ctx_name = ctx_name[:12] + "..." if len(ctx_name) > 12 else ctx_name
self._status_bar.showMessage(f"{ctx_name}@{hex(self.controller.last_ctx.addr)}")
self._ctx_table.reload()
def _reload_tables(self):
for _, table in self.tables.items():
table.reload()
def _update_table_data(self):
if self.controller.client.has_remote:
self.controller.client.init_remote()
for _, table in self.tables.items():
table.update_table()
|
11585198
|
import greenlet
from .hubs.hub import get_hub
__all__ = ['Timeout', 'with_timeout']
_NONE = object()
# deriving from BaseException so that "except Exception as e" doesn't catch
# Timeout exceptions.
class Timeout(BaseException):
"""Raise `exception` in the current greenthread after `timeout` seconds.
When `exception` is omitted or ``None``, the :class:`Timeout` instance itself is raised. If
`seconds` is None, the timer is not scheduled, and is only useful if you're planning to raise it
directly.
Timeout objects are context managers, and so can be used in with statements. When used in a with
statement, if `exception` is ``False``, the timeout is still raised, but the context manager
suppresses it, so the code outside the with-block won't see it.
"""
def __init__(self, seconds=None, exception=None):
"""
:param float seconds: timeout seconds
:param exception: exception to raise when timeout occurs
"""
self.seconds = seconds
self.exception = exception
self.timer = None
self.start()
def start(self):
"""Schedule the timeout. This is called on construction, so
it should not be called explicitly, unless the timer has been
canceled."""
assert not self.pending, \
'%r is already started; to restart it, cancel it first' % self
if self.seconds is None: # "fake" timeout (never expires)
self.timer = None
elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self
self.timer = get_hub().schedule_call_global(
self.seconds, greenlet.getcurrent().throw, self)
else: # regular timeout with user-provided exception
self.timer = get_hub().schedule_call_global(
self.seconds, greenlet.getcurrent().throw, self.exception)
return self
@property
def pending(self):
"""True if the timeout is scheduled to be raised
"""
if self.timer is not None:
return self.timer.pending
else:
return False
def cancel(self):
"""If the timeout is pending, cancel it
If not using Timeouts in ``with`` statements, always call cancel() in a ``finally`` after
the block of code that is getting timed out. If not canceled, the timeout will be raised
later on, in some unexpected section of the application.
"""
if self.timer is not None:
self.timer.cancel()
self.timer = None
def __repr__(self):
classname = self.__class__.__name__
if self.pending:
pending = ' pending'
else:
pending = ''
if self.exception is None:
exception = ''
else:
exception = ' exception=%r' % self.exception
return '<%s at %s seconds=%s%s%s>' % (
classname, hex(id(self)), self.seconds, exception, pending)
def __str__(self):
if self.seconds is None:
return ''
if self.seconds == 1:
suffix = ''
else:
suffix = 's'
if self.exception is None or self.exception is True:
return '%s second%s' % (self.seconds, suffix)
elif self.exception is False:
return '%s second%s (silent)' % (self.seconds, suffix)
else:
return '%s second%s (%s)' % (self.seconds, suffix, self.exception)
def __enter__(self):
if self.timer is None:
self.start()
return self
def __exit__(self, typ, value, tb):
self.cancel()
if value is self and self.exception is False:
return True
def with_timeout(seconds, function, *args, **kwds):
"""Wrap a call to some (yielding) function with a timeout
If the called function fails to return before the timeout, cancel it and return a flag value.
"""
timeout_value = kwds.pop("timeout_value", _NONE)
timeout = Timeout(seconds)
try:
try:
return function(*args, **kwds)
except Timeout as ex:
if ex is timeout and timeout_value is not _NONE:
return timeout_value
raise
finally:
timeout.cancel()
|
11585222
|
import pytest
from ....order.tests.benchmark.test_order import FRAGMENT_AVAILABLE_SHIPPING_METHODS
from ....tests.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_retrieve_shop(api_client, channel_USD, count_queries):
query = (
FRAGMENT_AVAILABLE_SHIPPING_METHODS
+ """
query getShop($channel: String!) {
shop {
defaultCountry {
code
country
}
availableShippingMethods(channel: $channel) {
...AvailableShippingMethods
}
countries {
country
code
}
}
}
"""
)
get_graphql_content(
api_client.post_graphql(query, variables={"channel": channel_USD.slug})
)
|
11585236
|
import numpy as np
import pandas as pd
from contextlib import contextmanager
import time
import logging
@contextmanager
def timing(name):
t0 = time.time()
yield
log_out = 'Fragment [{}] done in {:.2f} s\n'.format(name, time.time() - t0)
print(log_out)
logging.info(log_out)
def get_indiv_important_cols(indiv_train, indiv_cat_train, country_code, min_corr_val=0.05):
indiv_cat_train[country_code] = (1 * indiv_train.reset_index('id').groupby('id')['poor'].mean())
important_cols_indiv = indiv_cat_train.drop(
country_code, axis=1
).corrwith(
indiv_cat_train[country_code]
).abs().between(min_corr_val, 1)
return important_cols_indiv[important_cols_indiv == True].index
def round_float_to(number, round_to=0.05):
return round(number / round_to) * round_to
def get_round_num(num, round_num):
return int((round_num * (num // round_num)) + round_num)
def get_opt_val_seeds(size, seed=1030):
np.random.seed(seed)
opt_val_seeds = np.random.choice([42, 1029, 610, 514], size=size, replace=True)
return opt_val_seeds
def make_country_sub(preds, test_feat, country):
country_sub = pd.DataFrame(
data=preds,
columns=['poor'],
index=test_feat.index
)
country_sub['country'] = country
return country_sub[['country', 'poor']]
|
11585241
|
from typing import Iterable
from testplan.report import TestReport
class ParseSingleAction:
def __call__(self) -> TestReport:
pass
class ParseMultipleAction:
def __call__(self) -> Iterable[TestReport]:
pass
class ProcessResultAction:
def __call__(self, result: TestReport) -> TestReport:
pass
|
11585249
|
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import EntryListCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x0D1100
_REQUEST_MESSAGE_TYPE = 856320
# hex: 0x0D1101
_RESPONSE_MESSAGE_TYPE = 856321
_REQUEST_INITIAL_FRAME_SIZE = REQUEST_HEADER_SIZE
def encode_request(name):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
StringCodec.encode(buf, name, True)
return OutboundMessage(buf, True)
def decode_response(msg):
msg.next_frame()
return EntryListCodec.decode(msg, DataCodec.decode, DataCodec.decode)
|
11585252
|
from django.urls import re_path
from rest_framework.routers import DefaultRouter
from apps.vadmin.system.views import DictDataModelViewSet, DictDetailsModelViewSet, \
ConfigSettingsModelViewSet, SaveFileModelViewSet, MessagePushModelViewSet, LoginInforModelViewSet, \
OperationLogModelViewSet, CeleryLogModelViewSet, SystemInfoApiView
router = DefaultRouter()
router.register(r'dict/type', DictDataModelViewSet)
router.register(r'dict/data', DictDetailsModelViewSet)
router.register(r'config', ConfigSettingsModelViewSet)
router.register(r'savefile', SaveFileModelViewSet)
router.register(r'message', MessagePushModelViewSet)
router.register(r'logininfor', LoginInforModelViewSet)
router.register(r'operation_log', OperationLogModelViewSet)
router.register(r'celery_log', CeleryLogModelViewSet)
urlpatterns = [
re_path('dict/get/type/(?P<pk>.*)/', DictDetailsModelViewSet.as_view({'get': 'dict_details_list'})),
re_path('config/configKey/(?P<pk>.*)/', ConfigSettingsModelViewSet.as_view({'get': 'get_config_key'})),
# 参数管理导出
re_path('config/export/', ConfigSettingsModelViewSet.as_view({'get': 'export'})),
# 清理参数缓存
re_path('config/clearCache/', ConfigSettingsModelViewSet.as_view({'delete': 'clearCache', })),
# 导出字典管理数据
re_path('dict/type/export/', DictDataModelViewSet.as_view({'get': 'export'})),
# 导出字典详情数据
re_path('dict/data/export/', DictDetailsModelViewSet.as_view({'get': 'export'})),
# 清理字典缓存
re_path('dict/type/clearCache/', DictDetailsModelViewSet.as_view({'delete': 'clearCache', })),
# 消息通知导出
re_path('message/export/', MessagePushModelViewSet.as_view({'get': 'export', })),
# 用户个人消息列表
re_path('message/user_messages/', MessagePushModelViewSet.as_view({'get': 'get_user_messages', })),
# 改为已读
re_path('message/is_read/(?P<pk>.*)/', MessagePushModelViewSet.as_view({'put': 'update_is_read', })),
# 清空操作日志
re_path('operation_log/clean/', OperationLogModelViewSet.as_view({'delete': 'clean_all', })),
# 导出操作日志
re_path('operation_log/export/', OperationLogModelViewSet.as_view({'get': 'export', })),
# 清空登录日志
re_path('logininfor/clean/', LoginInforModelViewSet.as_view({'delete': 'clean_all', })),
# 导出登录日志
re_path('logininfor/export/', LoginInforModelViewSet.as_view({'get': 'export', })),
# 清空定时日志
re_path('celery_log/clean/', CeleryLogModelViewSet.as_view({'delete': 'clean_all', })),
# 导出定时日志
re_path('celery_log/export/', CeleryLogModelViewSet.as_view({'get': 'export', })),
# 清除废弃文件
re_path('clearsavefile/', SaveFileModelViewSet.as_view({'post': 'clearsavefile', })),
# 获取系统信息cpu、内存、硬盘
re_path('sys/info/', SystemInfoApiView.as_view())
]
urlpatterns += router.urls
|
11585258
|
class Doc2Tester:
def __init__(self, mod, doc, node):
self.mod = mod
self.doc = doc
self.node = node
self.exdefs = []
self.set_out([])
self.test_names = {}
self.condition_exprs = {}
self.condition_methods = {}
self.document_metas = []
self.document_lang = None
self.document_title = None
self.example_exprs = {}
self.ex_id = 1
def _visit_children(self, node):
E = self.mod.ReportedError
for ch in node.children:
try:
ch.accept(self)
except E:
pass
def ap(self, *args):
self.extend(args)
def error(self, msg, *args, **kwds):
msg = 'Doc2Tester: ' + msg
self.doc.env.error(msg, *args, **kwds)
def get_condition_method(self, cond):
idn = cond.get_id_name()
name = self.condition_methods.get(idn)
if name is None:
name = 'cond_%d' % len(self.condition_methods)
self.condition_methods[idn] = name
ap = self.ap
myself = '_self'
arg_names = cond.get_arg_names()
while myself in arg_names:
myself = '_' + myself
argstring = ', '.join([myself]+arg_names)
ap('def %s(%s):' % (name, argstring), '<NL>', '<INDENT>')
ap('# Condition: %s' % idn, '<NL>')
pcs = cond.find_aspects('python_code')
if not pcs:
self.error('No python code specified for testing condition: %r.' % idn,
cond.src.node, exception=None)
for pc in pcs:
for ic in pc.find_aspects('in_context'):
ctx = ic.src.node.arg.strip()
for line in ctx.split('\n'):
ap(line, '<NL>')
if pc is pcs[-1]:
ap('return (%s)' % pc.src.node.arg.strip(), '<NL>')
else:
ap('if not (%s):' %
pc.src.node.arg.strip(), '<NL>', '<INDENT>')
ap('return False', '<NL>', '<DEDENT>')
ap('<DEDENT>')
return name
def gen_comment(self, s):
for line in s.split('\n'):
self.ap('# '+line, '<NL>')
def get_children(self, node):
if not node.arg.strip():
return node.children
return (self.doc.node_of_taci('name', node.arg.strip()),)+tuple(node.children)
def get_condition_expr(self, po, args, attr, src, tgt):
cond = po.get_definition()
if cond is None:
return None
name = self.get_condition_method(cond)
callargs = []
self_name = attr.get_self_name()
for an in po.arg_names:
v = None
if an == self_name:
v = src
elif an == '<returned value>':
v = tgt
else:
v = args.get_arg_value(an)
if v is None:
self.error('Invalid condition argument: %r.' % an,
po.src.node,
exception=None)
continue
v = args.mapname(v)
callargs.append(v)
return '%sself.%s(%s)' % (
('', 'not ')[po.is_not],
name,
', '.join(callargs))
def get_example_expr(self, eg):
ex = self.example_exprs.get(eg)
if ex is not None:
return ex
ex = eg.get_ex_text()
ctx = eg.get_ctx_text()
if ctx:
ex_name = 'get_ex_%d' % self.ex_id
self.ex_id += 1
self.ap('def %s(self):' % ex_name, '<NL>', '<INDENT>')
for line in ctx.split('\n'):
self.ap(line, '<NL>')
self.ap('return %s' % ex, '<NL>', '<DEDENT>')
ex = 'self.%s()' % ex_name
self.example_exprs[eg] = ex
return ex
def get_test_name(self, kind):
tn = kind.srclastname
if tn in self.test_names:
i = self.test_names[tn]
self.test_names[tn] += 1
tn = '%s_%d' % (tn, i)
else:
self.test_names[tn] = 1
return tn
def get_tester(self):
ap = self.ap
ap('# Tests generated by: %s' % __name__, '<NL>')
# ap('# Main source file: %s' %
# self.doc.env.get_package().get_filename(), '<NL>')
ap('# Date: %s' % self.mod.time.asctime(
self.mod.time.localtime()), '<NL>')
ap('class Tester:', '<NL>', '<INDENT>')
ap('tests = {}', '<NL>')
self._visit_children(self.node)
lines = []
indent = 0
line = []
for tok in self.out:
if tok == '<INDENT>':
indent += 4
elif tok == '<DEDENT>':
indent -= 4
elif tok == '<NL>':
lines.append(''.join(line))
line = []
else:
if not line:
line.append(indent * ' ')
line.append(tok)
return '\n'.join(lines)+'\n'
def set_out(self, out):
self.out = out
self.extend = out.extend
def visit_arglist(self, node):
self.ap('(')
comma = 0
for ch in self.get_children(node):
if comma:
self.ap(', ')
else:
comma = 1
ch.accept(self)
self.ap(')')
def visit_assign(self, node):
children = self.get_children(node)
while children:
children[0].accept(self)
self.ap(' = ')
children[1].accept(self)
self.ap('<NL>')
children = children[2:]
def visit_block(self, node):
self.ap('<INDENT>')
self._visit_children(node)
self.ap('<DEDENT>')
def visit_call(self, node):
children = self.get_children(node)
children[0].accept(self)
self.ap('(')
comma = 0
for ch in children[1:]:
if comma:
self.ap(', ')
else:
comma = 1
ch.accept(self)
self.ap(')')
def visit_comment(self, node):
pass
def visit_def(self, node):
self.ap('def ')
children = self.get_children(node)
children[0].accept(self)
children[1].accept(self)
self.ap(':', '<NL>')
for ch in children[2:]:
ch.accept(self)
def visit_default(self, node):
self.error('I don\'t know what to generate for the tag %r.' %
node.tag, node)
def visit_document(self, node):
self._visit_children(node)
def visit_document_lang(self, node):
if self.document_lang is not None:
self.error('Duplicate document lang directive.', node)
self.document_lang = node
def visit_document_title(self, node):
if self.document_title is not None:
self.error('Duplicate document title directive.', node)
self.document_title = node
def visit_exdefs(self, node):
self.exdefs.append(node)
def visit_fcall(self, node):
self.visit_call(node)
def visit_index(self, node):
children = self.get_children(node)
children[0].accept(self)
self.ap('[')
comma = 0
for ch in children[1:]:
if comma:
self.ap(', ')
else:
comma = 1
ch.accept(self)
self.ap(']')
def visit_meta(self, node):
self.document_metas.append(node)
def visit_name(self, node):
self.ap(node.arg.strip())
def visit_pcall(self, node):
self.visit_call(node)
self.ap('<NL>')
def visit_string(self, node):
self.ap('%r' % node.arg.strip())
def visit_subject(self, node):
self.ap('SUBJECT')
def visit_test_of(self, node):
TestOf(self, node)
def visit_test_program_segment(self, node):
self.gen_comment('Test for: %s' % node.arg.strip())
self._visit_children(node)
def visit_to_document_only(self, node):
pass
def visit_to_tester_only(self, node):
self._visit_children(node)
class DiffKind:
def __init__(self, a, b):
self.a = a
self.b = b
self.d_tag = 'diffkind'
def get_atom_kinds(self):
return [self]
def get_id_name(self):
return self.a.get_id_name() + '+' + self.b.get_id_name() + "'"
class EitherTest:
def __init__(self, ):
pass
class TestOf(Doc2Tester):
def __init__(self, parent, node):
self.__dict__.update(parent.__dict__)
self.parent = parent
self.node = node
kind, args = node.arg, node.children
mod = self.mod = parent.mod
self.Kan = mod.KanExtension
self.Cat = mod.Cat
self.args = args
self.kind = kind
self.coverage = None
for node in args:
t = node.tag
if t == 'coverage':
if self.coverage is not None:
self.error('Duplicate coverage specifications',
node, exception=None)
else:
try:
coverage = int(node.arg.strip())
except ValueError:
coverage = node.arg.strip()
self.coverage = coverage
else:
self.error('Invalid element in %r.' %
self.node.tag, node, exception=None)
if self.coverage is None:
self.coverage = 1
self.make_cat()
def getattr_code(self, obj, attr):
if self.mod.is_identifier(attr):
return '%s.%s' % (obj, attr)
else:
return 'getattr(%s, %r)' % (obj, attr)
def warn(self, message, node=None):
self.error(message, node, exception=None, harmless=1)
def make_cat(self):
objects = []
arrows = {}
relations = []
fa = {}
fo = {}
arg_counter = 1
ac = [1]
eithertests = []
kinds = []
kindofname = {}
ex_setup = []
ex_map = {}
pc_checks = {}
def add_kind(kind):
name = kind.get_id_name()
if name not in objects:
objects.append(name)
kinds.append(kind)
kindofname[name] = kind
return name
def get_example_expr(a):
if a in ex_map:
return ex_map[a]
x = self.parent.get_example_expr(a)
usage = a.get_use_text(x)
ex_map[a] = usage
return usage
def gen_atom_beam(asp):
asptgt = asp.tgt
tag = asptgt.d_tag
if tag == 'attribute':
attr = asptgt
otherkinds = []
atkak = attr.get_atom_beams()
for ab in atkak:
a = ab.tgt
if a.d_tag == 'mapping':
op = asp + ab
ret_kind = a.get_return_test_kind()
tgt_name = add_kind(ret_kind)
examples = a.get_args_examples(get_example_expr, kind)
for args in examples:
arrow_name = 'CALLATTR%d' % ac[0]
ac[0] += 1
arrows[arrow_name] = (src_name, tgt_name)
fa[arrow_name] = ('callattr', op, args)
else:
otherkinds.append(a)
if otherkinds or not atkak:
if len(otherkinds) != len(atkak):
# make new atk somehow
oth = [(x.get_id_name(), x) for x in otherkinds]
oth.sort()
otherkinds = [y for (x, y) in oth]
atk = self.mod.Main.Kind()
atk.d_tag = 'kind'
atk.aspects = otherkinds
atk.tgtfullname = '(%s)' % (
'&'.join([x.tgtfullname for x in otherkinds]))
else:
atk = attr.get_test_kind()
arrow_name = '%s:%d' % (attr.get_id_name(), ac[0])
ac[0] += 1
tgt_name = add_kind(atk)
assert arrow_name not in arrows
arrows[arrow_name] = (src_name, tgt_name)
fa[arrow_name] = ('getattr', attr.get_attr_name())
elif tag in ('operator', 'function_operator', 'inplace_operator',
'reverse_operator', 'mapping', 'setitem', 'delitem', 'getitem'):
ret_kind = asp.get_return_test_kind()
tgt_name = add_kind(ret_kind)
examples = asp.get_args_examples(get_example_expr)
for args in examples:
arrow_name = 'OPERATOR%d' % ac[0]
ac[0] += 1
arrows[arrow_name] = (src_name, tgt_name)
fa[arrow_name] = (tag, asp, args)
elif tag == 'either':
asp_name = add_kind(asptgt)
if asptgt is not kind:
arrow_name = '(%s:%s:%d)' % (src_name, asp_name, ac[0])
ac[0] += 1
arrows[arrow_name] = (src_name, asp_name)
fa[arrow_name] = ('subkind', asp_name)
for i, ak in enumerate(asptgt.get_alt_kinds()):
tgt_name = add_kind(ak)
arrow_name = "(%s'%s)" % (asp_name, tgt_name)
arrows[arrow_name] = (asp_name, tgt_name)
fa[arrow_name] = ('superkind', i)
else:
assert 0
add_kind(self.kind)
while kinds:
kind = kinds.pop()
src_name = kind.get_id_name()
for asp in kind.get_atom_beams():
try:
gen_atom_beam(asp)
except self.mod.ReportedError:
pass
cat = self.Cat.oarcat(objects, arrows, relations)
tester = self.Kan.category_tester(
self.Cat.Functor(fo, fa, cat), coverage=self.coverage)
def get_arrow_name(a):
return ','.join(a)
tester.get_arrow_name = get_arrow_name
object_tester = tester.get_object_tester(self.kind.get_id_name())
icode = object_tester.get_intermediate_test_code()
e_names = {}
e_name_no = [0]
def e_name_of(a):
if not a:
return 'arg'
# return 'e[%r]'%a
if a not in e_names:
e_names[a] = 't%d' % e_name_no[0]
e_name_no[0] += 1
return e_names[a]
def call_fo(a, b):
return 'fo[%r](%s)' % (a, e_name_of(b))
def assign_fa(append, tgt, func, src):
tag = func[0]
if tag == 'getattr':
name = func[1]
append('%s = %s' % (tgt, self.getattr_code(src, name)))
elif tag in ('callattr', 'operator', 'inplace_operator', 'function_operator',
'reverse_operator', 'mapping', 'setitem', 'delitem', 'getitem'):
op = func[1]
opname = op.get_op_name()
args = func[2]
sus = args.get_setups_for_preconditions()
if sus is not None:
for su in sus:
append('%s.%s(%s)' %
(src, su.get_name(), su.get_args()))
else:
self.error('Could not find postcondition to satisfy precondition for %r.' % (
op.get_op_id_name()),
exception=None)
eqs = []
for eq in op.find_equations():
pres = eq.find_aspects('precondition')
posts = eq.find_aspects('postcondition')
# xxx late checking but will do for now I may relax it anyway
if len(pres) != 1:
if pres:
node = pres[0].src.node
else:
node = eq.src.node
self.error('Exactly one precondition currently allowed in equation.',
node,
exception=None)
if len(posts) < 1:
self.error('At least one postcondition currently required in equation.',
eq.src.node,
exception=None)
if pres:
pre = pres[0]
ce = self.parent.get_condition_expr(
pre, args, op, src, None)
if ce is None:
# This 'cant happen' anymore, checked here 'just in case'...
raise SyntaxError('Undefined condition: %r' % pre)
prename = 'pre_%d' % len(eqs)
eqs.append((prename, posts))
append('%s = %s' % (prename, ce))
if tag == 'callattr':
append('%s = %s(%s)' %
(tgt, self.getattr_code(src, opname), args))
elif tag == 'operator':
append('%s = %s %s %s' % (tgt, src, opname, args))
elif tag == 'inplace_operator':
append('%s = %s' % (tgt, src))
append('%s %s %s' % (tgt, opname, args))
elif tag == 'reverse_operator':
append('%s = %s %s %s' % (tgt, args, opname, src))
elif tag == 'function_operator':
argstr = str(args)
if argstr:
argstr = ', ' + argstr
append('%s = %s(%s%s)' % (tgt, opname, src, argstr))
elif tag == 'mapping':
append('%s = %s(%s)' % (tgt, src, args))
elif tag == 'getitem':
# Number of args >=1, has been checked.
append('%s = %s[%s]' % (tgt, src, args))
elif tag == 'delitem':
# Number of args >=1, has been checked.
append('del %s[%s]' % (src, args))
elif tag == 'setitem':
# Number of args >=2, has been checked.
append('%s[%s] = %s' %
(src, ', '.join(args.negs[:-1]), args.negs[-1]))
else:
assert 0
posts = args.get_postconditions()
for po in posts:
ce = self.parent.get_condition_expr(po, args, op, src, tgt)
if ce is None:
continue
append('assert %s, %r' % (
ce,
'Failed postcondition: %r' % po.cond_expr,
))
for (prename, posts) in eqs:
for post in posts:
ce = self.parent.get_condition_expr(
post, args, op, src, tgt)
if ce is None:
# This 'cant happen' anymore, checked here 'just in case'...
raise SyntaxError('Undefined condition: %r' % post)
message = 'Failed postcondition equality: %r' % post
append('assert %s == %s, %r' % (
prename, ce, message))
else:
assert 0
pylines = []
class ArrowTree:
def __init__(self, pre):
self.pre = pre
self.children = []
def __str__(self):
if self.children:
chdrn = ', chdrn = %s' % self.children
else:
chdrn = ''
return 'AT(pre = %s%s)' % (self.pre, chdrn)
__repr__ = __str__
ats = {}
def at_of_pre(pre):
at = ats.get(pre)
if at is None:
at = ArrowTree(pre)
ats[pre] = at
if pre:
at_of_pre(pre[:-1]).children.append(at)
return at
def trav(t):
subs = []
src = t.pre
src_name = e_name_of(src)
for ch in t.children:
try:
ar = ch.pre[-1]
func = fa[ar]
tgt = ch.pre
tag = func[0]
if tag == 'subkind':
e_names[ch.pre] = src_name
trav(ch)
elif tag == 'superkind':
subs.append((func, ch))
else:
assign_fa(pylines.append, e_name_of(
tgt), func, src_name)
trav(ch)
except self.mod.ReportedError:
pass
if subs:
subs.sort()
indents = 0
for func, ch in subs:
e_names[ch.pre] = src_name
pos = len(pylines)
pylines.append('try:')
pylines.append('<INDENT>')
trav(ch)
if pos+2 == len(pylines):
pylines.pop()
pylines.pop()
else:
indents += 1
pylines.append('<DEDENT>')
pylines.append('except:')
pylines.append('<INDENT>')
if indents:
pylines.append('raise')
pylines.extend(['<DEDENT>']*indents)
alla = object_tester.get_all_arrows()
init = at_of_pre(())
for a in alla:
for i in range(1, len(a)+1):
at_of_pre(a[:i])
trav(init)
if not pylines:
self.warn('No tests generated for %r.' % self.kind.tgtfullname)
pylines = ['pass']
ap = self.parent.ap
name = self.parent.get_test_name(self.kind)
test_name = 'test_%s' % name
ap('def %s(self, arg):' % test_name, '<NL>', '<INDENT>')
for line in ex_setup + pylines:
ap(line)
if line not in ('<INDENT>', '<DEDENT>'):
ap('<NL>')
ap('<DEDENT>')
ap('tests[%r] = %s' % (self.kind.tgtfullname, test_name), '<NL>')
class _GLUECLAMP_:
_imports_ = (
'_parent:SpecNodes',
'_parent.SpecNodes:node_of_taci',
'_parent:Main',
'_parent.Main:ReportedError',
'_root.io:StringIO',
'_root.sys:stdout',
'_root:sys',
'_root.traceback:format_exception',
'_root.guppy.etc:KanExtension',
'_root.guppy.etc:Cat',
'_root:re',
'_root:time',
)
# Reserved words found in Grammar of Python 2.3.3
# Is there a list somewhere in some std lib?
reserved_words = (
'and',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'yield',
)
def _get_is_identifier(self):
m = self.re.compile('[_a-zA-Z][_a-zA-Z0-9]*').match
rwd = dict([(rw, 1) for rw in self.reserved_words])
return lambda x: m(x) and x not in rwd
def doc2text(self, doc, node):
d2h = Doc2Tester(self, doc, node)
return d2h.get_tester()
def doc2filer(self, doc, node, name, dir, opts, IO):
text = self.doc2text(doc, node)
path = IO.path.join(dir, '%s.py' % name)
node = self.node_of_taci('write_file', path, [
self.node_of_taci('text', text)])
return node
|
11585262
|
from logtacts.settings import *
import fakeredis
CACHES['default']['OPTIONS']['REDIS_CLIENT_CLASS'] = "fakeredis.FakeStrictRedis"
|
11585288
|
import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(BaseEvalHook):
"""Please refer to `mmcv.runner.hooks.evaluation.py:EvalHook` for detailed
docstring."""
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
if hasattr(self.dataloader.dataset,
'load_as_video') and self.dataloader.dataset.load_as_video:
from qdtrack.apis import single_gpu_test
else:
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(BaseDistEvalHook):
"""Please refer to `mmcv.runner.hooks.evaluation.py:DistEvalHook` for
detailed docstring."""
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
if hasattr(self.dataloader.dataset,
'load_as_video') and self.dataloader.dataset.load_as_video:
from qdtrack.apis import multi_gpu_test
else:
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
|
11585308
|
import logging
import os
import sys
import splunk.admin as admin
import cloudformation_templates_schema
import urllib
import base_eai_handler
import log_helper
if sys.platform == 'win32':
import msvcrt
# Binary mode is required for persistent mode on Windows.
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
# Setup the handler
logger = log_helper.setup(logging.INFO, 'CloudFormationTemplatesEAIHandler', 'cloudformation_templates_handler.log')
class CloudFormationTemplatesEAIHandler(base_eai_handler.BaseEAIHandler):
def setup(self):
# Add our supported args
for arg in cloudformation_templates_schema.ALL_FIELDS:
self.supportedArgs.addOptArg(arg)
def handleList(self, confInfo):
"""
Called when user invokes the "list" action.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('CloudFormation Templates list requested.')
# Fetch from cloudformation_templates conf handler
conf_handler_path = self.get_conf_handler_path_name('cloudformation_templates', 'nobody')
cloudformation_templates_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET', get_args={'count': -1})
# Add link alternate (without mgmt, scheme, host, port) to list response
for cloudformation_template in cloudformation_templates_eai_response_payload['entry']:
grand_central_aws_accounts_link_alternate = cloudformation_template['links']['alternate'].replace('/configs/conf-cloudformation_templates/', '/cloudformation_templates/')
cloudformation_template['content']['cloudformation_templates_link_alternate'] = grand_central_aws_accounts_link_alternate
cloudformation_template['content']['cloudformation_template_name'] =cloudformation_template['name']
cloudformation_template['content']['label'] = cloudformation_template['content'].get('label', '')
cloudformation_template['content']['description'] = cloudformation_template['content'].get('description', '')
cloudformation_template['content']['filename'] = cloudformation_template['content'].get('filename', '')
self.set_conf_info_from_eai_payload(confInfo, cloudformation_templates_eai_response_payload)
def handleCreate(self, confInfo):
"""
Called when user invokes the "create" action.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('AWS CloudFormation template creation requested.')
# Validate and extract correct POST params
params = self.validate_cloudformation_templates_schema_params()
# cloudformation_templates.conf creation and response
post_args = {
'name': params['name'],
'label': params['label'],
'description': params['description'],
'filename': params['filename']
}
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'local')) + '/' + params['filename'], 'w') as outfile:
outfile.write(params['template'])
cloudformation_templates_eai_response_payload = self.simple_request_eai(self.get_conf_handler_path_name('cloudformation_templates'),
'create', 'POST', post_args)
# Always populate entry content from request to list handler.
cloudformation_templates_rest_path = '/servicesNS/%s/%s/cloudformation_templates/%s' % (
'nobody', self.appName, urllib.quote_plus(params['name']))
cloudformation_templates_eai_response_payload = self.simple_request_eai(cloudformation_templates_rest_path, 'read', 'GET')
self.set_conf_info_from_eai_payload(confInfo, cloudformation_templates_eai_response_payload)
def handleEdit(self, confInfo):
"""
Called when user invokes the 'edit' action. Index modification is not supported through this endpoint. Both the
scripted input and the grand_central_aws_accounts.conf stanza will be overwritten on ANY call to this endpoint.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('Cloudformation template edit requested.')
name = self.callerArgs.id
conf_stanza = urllib.quote_plus(name)
params = self.validate_server_schema_params()
conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('cloudformation_templates', 'nobody'), conf_stanza)
# Create post args - remove name to ensure edit instead of create
cloudformation_templates_conf_postargs = {
'label': params['label'],
'description': params['description'],
'filename': params['filename']
}
# Edit cloudformation_templates.conf
cloudformation_templates_eai_response_payload = self.simple_request_eai(conf_handler_path, 'edit', 'POST',
cloudformation_templates_conf_postargs)
# Always populate entry content from request to list handler.
cloudformation_templates_rest_path = '/servicesNS/%s/%s/cloudformation_templates/%s' % ('nobody', self.appName, conf_stanza)
cloudformation_templates_eai_response_payload = self.simple_request_eai(cloudformation_templates_rest_path, 'read', 'GET')
self.set_conf_info_from_eai_payload(confInfo, cloudformation_templates_eai_response_payload)
def handleRemove(self, confInfo):
"""
Called when user invokes the 'remove' action. Removes the requested stanza from inputs.conf (scripted input),
removes the requested stanza from grand_central_aws_accounts.conf, and removes all related credentials
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('CloudFormation template removal requested.')
name = self.callerArgs.id
conf_stanza = urllib.quote_plus(name)
cloudformation_templates_rest_path = '/servicesNS/%s/%s/cloudformation_templates/%s' % (
'nobody', self.appName, urllib.quote_plus(name))
cloudformation_templates_eai_response_payload = self.simple_request_eai(cloudformation_templates_rest_path,
'read', 'GET')
filename = cloudformation_templates_eai_response_payload['entry'][0]['content']['filename']
# Delete actual CloudFormation template file
filepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'local')) + '/' + filename
if os.path.exists(filepath):
os.remove(filepath)
# Delete cloudformation_templates.conf stanza
conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('cloudformation_templates'), conf_stanza)
cloudformation_templates_eai_response_payload = self.simple_request_eai(conf_handler_path, 'remove', 'DELETE')
self.set_conf_info_from_eai_payload(confInfo, cloudformation_templates_eai_response_payload)
def validate_cloudformation_templates_schema_params(self):
"""
Validates raw request params against the server schema
"""
params = self.get_params(schema=cloudformation_templates_schema, filter=cloudformation_templates_schema.CLOUDFORMATION_TEMPLATE_FIELDS)
return self.validate_params(cloudformation_templates_schema.cloudformation_template_schema, params)
admin.init(CloudFormationTemplatesEAIHandler, admin.CONTEXT_NONE)
|
11585333
|
if "bpy" in locals():
import importlib
importlib.reload(ycd)
else:
from . import ycd
import bpy
|
11585351
|
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
a = np.load('../datasets/toyData/valData.npy')
a = np.random.randn(10,3,25) * 50
# index = 50
"""
numframes = 100
numpoints = 25
fig = plt.figure()
scat = plt.scatter()
ani = animation.FuncAnimation(fig, update_plot, frames=xrange(numframes),
fargs=(color_data, scat))
plt.show()
def update_plot(i, data, scat):
scat.set_array(data[i])
return scat,
Look at :
https://stackoverflow.com/questions/9401658/how-to-animate-a-scatter-plot
"""
for i in range(1): ####### <<<============== 300
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# xs = a[index, 0, i, :]
# ys = a[index, 1, i, :]
# zs = a[index, 2, i, :]
xs = a[0,:]
ys = a[1,:]
zs = a[2,:]
#for j in range(25):
#plt.text(a[5,0,i,j],a[5,1,i,j], a[5,2,i,j], str(j))
ax.scatter(xs, ys, zs, c = 'r', marker = 'o',alpha=0.5)
for j in range(25):
ax.text(xs[j],ys[j],zs[j], '%s' % (j))
plt.show()
print("Frame%d"%(i))
|
11585357
|
from .config import sample_data
from .context import pandas_ta
from unittest import skip, TestCase
from pandas import DataFrame
class TestOverlapExtension(TestCase):
@classmethod
def setUpClass(cls):
cls.data = sample_data
@classmethod
def tearDownClass(cls):
del cls.data
def setUp(self): pass
def tearDown(self): pass
def test_alma_ext(self):
self.data.ta.alma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "ALMA_10_6.0_0.85")
def test_dema_ext(self):
self.data.ta.dema(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "DEMA_10")
def test_ema_ext(self):
self.data.ta.ema(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "EMA_10")
def test_fwma_ext(self):
self.data.ta.fwma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "FWMA_10")
def test_hilo_ext(self):
self.data.ta.hilo(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(list(self.data.columns[-3:]), ["HILO_13_21", "HILOl_13_21", "HILOs_13_21"])
def test_hl2_ext(self):
self.data.ta.hl2(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "HL2")
def test_hlc3_ext(self):
self.data.ta.hlc3(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "HLC3")
def test_hma_ext(self):
self.data.ta.hma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "HMA_10")
def test_hwma_ext(self):
self.data.ta.hwma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "HWMA_0.2_0.1_0.1")
def test_jma_ext(self):
self.data.ta.jma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "JMA_7_0")
def test_kama_ext(self):
self.data.ta.kama(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "KAMA_10_2_30")
def test_ichimoku_ext(self):
self.data.ta.ichimoku(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(list(self.data.columns[-5:]), ["ISA_9", "ISB_26", "ITS_9", "IKS_26", "ICS_26"])
def test_linreg_ext(self):
self.data.ta.linreg(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "LR_14")
def test_mcgd_ext(self):
self.data.ta.mcgd(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "MCGD_10")
def test_midpoint_ext(self):
self.data.ta.midpoint(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "MIDPOINT_2")
def test_midprice_ext(self):
self.data.ta.midprice(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "MIDPRICE_2")
def test_ohlc4_ext(self):
self.data.ta.ohlc4(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "OHLC4")
def test_pwma_ext(self):
self.data.ta.pwma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "PWMA_10")
def test_rma_ext(self):
self.data.ta.rma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "RMA_10")
def test_sinwma_ext(self):
self.data.ta.sinwma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "SINWMA_14")
def test_sma_ext(self):
self.data.ta.sma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "SMA_10")
def test_ssf_ext(self):
self.data.ta.ssf(append=True, poles=2)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "SSF_10_2")
self.data.ta.ssf(append=True, poles=3)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "SSF_10_3")
def test_swma_ext(self):
self.data.ta.swma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "SWMA_10")
def test_supertrend_ext(self):
self.data.ta.supertrend(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(list(self.data.columns[-4:]), ["SUPERT_7_3.0", "SUPERTd_7_3.0", "SUPERTl_7_3.0", "SUPERTs_7_3.0"])
def test_t3_ext(self):
self.data.ta.t3(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "T3_10_0.7")
def test_tema_ext(self):
self.data.ta.tema(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "TEMA_10")
def test_trima_ext(self):
self.data.ta.trima(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "TRIMA_10")
def test_vidya_ext(self):
self.data.ta.vidya(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "VIDYA_14")
def test_vwap_ext(self):
self.data.ta.vwap(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "VWAP_D")
def test_vwma_ext(self):
self.data.ta.vwma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "VWMA_10")
def test_wcp_ext(self):
self.data.ta.wcp(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "WCP")
def test_wma_ext(self):
self.data.ta.wma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "WMA_10")
def test_zlma_ext(self):
self.data.ta.zlma(append=True)
self.assertIsInstance(self.data, DataFrame)
self.assertEqual(self.data.columns[-1], "ZL_EMA_10")
|
11585377
|
import torch
from SpatialExtractor.BaseCNN_4FeatureGetting import BaseCNN
from SpatialExtractor.Main_4FeatureGetting import parse_config
def make_spatial_model():
config = parse_config()
model = BaseCNN(config)
model = torch.nn.DataParallel(model).cuda()
ckpt = './SpatialExtractor/weights/DataParallel-00008.pt'
checkpoint = torch.load(ckpt)
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
make_spatial_model()
|
11585441
|
import json
import ast
from .ConnectionProperty import ConnectionProperty
from .DeadlineUtility import ArrayToCommaSeparatedString
class Jobs:
"""
Class used by DeadlineCon to send Job requests.
Stores the address of the Web Service for use in sending requests.
"""
def __init__(self, connectionProperties):
self.connectionProperties = connectionProperties
def GetJobIds(self):
""" Gets all the Job IDs.
Returns: The list of IDs.
"""
return self.connectionProperties.__get__("/api/jobs?IdOnly=true")
def GetJobs(self, ids = None):
""" Gets all specified Jobs, or all Jobs if none specified.
Input: List of Job Ids.
Returns: The list of Jobs.
"""
script = "/api/jobs"
if ids != None:
script = script +"?JobID=" + ArrayToCommaSeparatedString(ids)
return self.connectionProperties.__get__(script)
def CalculateJobStatistics(self, jobID):
"Gets job statistics for the specified job"
return self.connectionProperties.__get__("/api/jobs?JobID=" + jobID + "&Statistics=true")
def GetJobsInState(self, state):
""" Gets all jobs in the specified state.
Input: The state. Valid states are Active, Suspended, Completed, Failed, and Pending. Note that Active covers both Queued and Rendering jobs.
Returns: The list of Jobs in the specified state.
"""
return self.connectionProperties.__get__("/api/jobs?States=" + state)
def GetJobsInStates(self, states):
""" Gets all jobs in the specified states.
Input: The list of states. Valid states are Active, Suspended, Completed, Failed, and Pending. Note that Active covers both Queued and Rendering jobs.
Returns: The list of Jobs in the specified states.
"""
return self.connectionProperties.__get__("/api/jobs?States=" + ",".join(states))
def GetJob(self, id):
"""Gets a Job.
Input: id: The Job ID (may be a list).
Returns: The Job/s (list).
"""
jobId = ArrayToCommaSeparatedString(id)
result = self.connectionProperties.__get__("/api/jobs?JobID="+jobId)
if type(result) == list and len(result) > 0:
result = result[0]
return result
def SaveJob(self, jobData):
""" Updates a Job's properties in the database.
Input: jobData: The Jobs information in json format.
Returns: Success message.
"""
jobData = json.dumps(jobData)
body = '{"Command":"save", "Job":'+jobData+'}'
return self.connectionProperties.__put__("/api/jobs", body)
def SuspendJob(self, id):
""" Suspend a queued, rendering, or pending job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"suspend", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def SuspendJobNonRenderingTasks(self, id):
""" Suspends the Tasks for a Job that are not in the rendering state.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"suspendnonrendering", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def ResumeJob(self, id):
""" Resumes a job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"resume", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def ResumeFailedJob(self, id):
""" Resumes a failed Job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"resumefailed", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def DeleteJob(self, id):
""" Deletes a Job.
Input: id: The Job ID.
Returns: Success message.
"""
return self.connectionProperties.__delete__("/api/jobs?JobID="+id)
def RequeueJob(self, id):
""" Requeues a Job. All rendering and completed Tasks for the Job will be requeued.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"requeue", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def ArchiveJob(self, id):
""" Archive a non-queued, non-rendering Job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"archive", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def ImportJob(self,file):
""" Imports an archived Job and returns it.
Input: file: file location for archived Job.
Returns: Success message.
"""
body = '{"Command":"import", "File":"'+file+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def PendJob(self, id):
""" Place a Job with dependencies in the pending state.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"pend", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def ReleasePendingJob(self, id):
""" Releases a pending Job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"releasepending", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def CompleteJob(self, id):
""" Completes a Job. All incomplete Tasks for the Job will be marked as complete.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"complete", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def FailJob(self, id):
""" Fails a Job. All incomplete Tasks for the Job will be marked as failed.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"fail", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def UpdateJobSubmissionDate(self, id):
""" Sets the Job's submission date to the current time.
Input: id: The Jobs ID.
Returns: Success message.
"""
body = '{"Command":"updatesubmissiondate", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def SubmitJobFiles(self, info, plugin, aux = [], idOnly = False):
""" Submit a new Job using Job info file and plugin info file.
Input: info: The location of the Job Info File.
plugin: The location of the Plugin Info File.
aux: Array of any additional auxiliary submission files, defaults to empty.
idOnly: If True, only the Job's ID is returned, defaults to False.
Returns: The new Job.
"""
if not isinstance(aux, list):
aux = [aux]
return self.connectionProperties.__post__("/api/jobs", buildJobSubmission(info, plugin, aux, idOnly))
def SubmitJob(self, info, plugin, aux = [], idOnly = False):
""" Submit a new Job.
Input: info: Dictionary of Job information.
plugin: Dictionary of Plugin information for the Job.
aux: Array of any additional auxiliary submission files, defaults to empty.
idOnly: If True, only the Job's ID is returned, defaults to False.
Returns: The new Job.
"""
if not isinstance(aux, list):
aux = [aux]
body = '{"JobInfo":'+json.dumps(info)+',"PluginInfo":'+json.dumps(plugin)+',"AuxFiles":'+json.dumps(aux)
if idOnly:
body += ',"IdOnly":true'
body += '}'
return self.connectionProperties.__post__("/api/jobs", body)
def SubmitJobs(self, jobs=[], dependent=False):
""" Submits multiple Jobs.
Input: jobs: List of Jobs as dictionaries. Job dictionaries should contain the following properties:
JobInfo - Dictionary of Job information. Required property.
PluginInfo - Dictionary of Plugin information for the Job. Required property.
AuxFiles - List of any additional auxiliary submission files (defaults to empty). Required property.
DependsOnPrevious - True to make the Job dependent on the previously submitted Job. Defaults to false.
dependent: True to make each Job submitted dependent on the previous (except for the first one). Defaults to false.
Returns: Success message.
"""
if not isinstance(jobs, list):
jobs = [jobs]
body = '{"Jobs":' + json.dumps(jobs) + ',"Dependent":"' + str(dependent).lower() + '"}'
return self.connectionProperties.__post__( "/api/jobs", body )
#Machine Limits
def SetJobMachineLimit(self, id, limit, slaveList, whiteListFlag):
""" Sets a Job's machine limit.
Input: id: The Job ID.
limit: The maximum number of Slaves that can work on this Job at any one time.
slaveList: A list of Slaves which are either not allowed to work on or are the only allowed Slave for a Job.
whiteListFlag: If true the Slaves in the slavelist are the only Slaves allowed to work on the Job else, the Slaves are now allowed to work on the Job.
Returns: Success message.
"""
body = json.dumps({"Command":"setjobmachinelimit","JobID":id, "Limit":limit, "SlaveList":slaveList,"WhiteListFlag":whiteListFlag})
return self.connectionProperties.__put__("/api/jobs", body)
def AddSlavesToJobMachineLimitList(self, id, slaveList):
""" Add additional Slaves to the Jobs limit list.
Input: id: The Job ID.
slaveList: The Slaves to be added to the Jobs machine limit list.
Returns: Success message.
"""
body = json.dumps({"Command":"addslavestojobmachinelimitlist","JobID":id, "SlaveList":slaveList})
return self.connectionProperties.__put__("/api/jobs", body)
def RemoveSlavesFromJobMachineLimitList(self, id, slaveList):
""" Remove chosen Slaves from the Jobs limit list.
Input: id: The Job ID.
slaveList: The Slaves to be removed from the Jobs machine limit list.
Returns: Success message.
"""
body = json.dumps({"Command":"removeslavesfromjobmachinelimitlist","JobID":id,"SlaveList":slaveList})
return self.connectionProperties.__put__("/api/jobs", body)
def SetJobMachineLimitListedSlaves(self, id, slaveList):
""" Sets a Job's machine limit Slave list.
Input: id: The Job ID.
slaveList: A list of Slaves which are either not allowed to work on or are the only allowed Slave for a Job.
Returns: Success message.
"""
body = json.dumps({"Command":"setjobmachinelimitlistedslaves","JobID":id, "SlaveList":slaveList})
return self.connectionProperties.__put__("/api/jobs", body)
def SetJobMachineLimitWhiteListFlag(self, id, whiteListFlag):
""" Sets a Job's machine limit white list flag.
Input: id: The Job ID.
whiteListFlag: If true the Slaves in the slavelist are the only Slaves allowed to work on the Job else, the Slaves are now allowed to work on the Job.
Returns: Success message.
"""
body = json.dumps({"Command":"setjobmachinelimitwhitelistflag","JobID":id, "WhiteListFlag":whiteListFlag})
return self.connectionProperties.__put__("/api/jobs", body)
def SetJobMachineLimitMaximum(self, id, limit):
""" Sets a Job's machine limit maximum number of Slaves.
Input: id: The Job ID.
limit: The maximum number of Slaves that can work on this Job at any one time.
Returns: Success message.
"""
body = json.dumps({"Command":"setjobmachinelimitmaximum","JobID":id, "Limit":limit})
return self.connectionProperties.__put__("/api/jobs", body)
def AppendJobFrameRange(self, id, frameList):
""" Appends to a Job's frame range without affecting the existing Tasks. The only exception is if the Job's chunk size is greater than one, and the last Task is having frames appended to it.
Input: id: The Job ID.
frameList: The additional frames to append.
Returns: Success message.
"""
body = json.dumps({"Command":"appendjobframerange","JobID":id, "FrameList":frameList})
return self.connectionProperties.__put__("/api/jobs", body)
def SetJobFrameRange(self, id, frameList, chunkSize):
""" Modifies a Job's frame range. If the Job is currently being rendered, any rendering Tasks will be requeued to perform this operation.
Input: id: The Job ID.
frameList: The frame list.
chunkSize: The chunk size.
Returns: Success message.
"""
body = json.dumps({"Command":"setjobframerange","JobID":id, "FrameList":frameList, "ChunkSize":chunkSize})
return self.connectionProperties.__put__("/api/jobs", body)
#Job Details
def GetJobDetails(self, ids):
""" Gets the Job details for the provided Job IDs.
Input: The Job IDs (may be a list).
Returns: The Job Details for the valid Job IDs provided.
"""
script = "/api/jobs"
script = script +"?JobID=" + ArrayToCommaSeparatedString(ids)+"&Details=true"
return self.connectionProperties.__get__(script)
#Undelete/Purge Deleted
def GetDeletedJobs(self, ids = None):
""" Gets the undeleted Jobs that correspond to the provided Job IDs. If no IDs are provided, gets all the deleted Jobs.
Input: The Job IDs (optional, may be a list).
Returns: The Job/s (list).
"""
script = "/api/jobs?Deleted=true"
if ids != None:
script = script +"&JobID=" + ArrayToCommaSeparatedString(ids)
return self.connectionProperties.__get__(script)
def GetDeletedJobIDs(self):
""" Gets all the deleted Job IDs.
Returns: The list of deleted Job IDs.
"""
return self.connectionProperties.__get__("/api/jobs?IdOnly=true&Deleted=true")
def PurgeDeletedJobs(self, ids):
""" Purges the deleted Jobs that correspond to the provided IDs from the deleted Job collection.
Input: The deleted Job IDs (may be a list).
Returns: Success message.
"""
script = "/api/jobs?Purge=true"
script = script +"&JobID=" + ArrayToCommaSeparatedString(ids)
return self.connectionProperties.__delete__(script)
def UndeleteJob(self, id):
""" Undeletes deleted Job.
Input: id: The Job ID.
Returns: Success message.
"""
body = '{"Command":"undelete", "JobID":"'+id+'"}'
return self.connectionProperties.__put__("/api/jobs", body)
def UndeleteJobs(self, ids):
""" Undeletes deleted Jobs.
Input: id: The Job IDs.
Returns: Success message.
"""
body = json.dumps({"Command":"undelete","JobIDs":ids})
return self.connectionProperties.__put__("/api/jobs", body)
def buildJobSubmission(info, plugins, aux, idOnly):
infoText = fileRead(info)
pluginsText = fileRead(plugins)
body = '{"JobInfo":'+infoText+',"PluginInfo":'+pluginsText+',"AuxFiles":'+json.dumps(aux)
if idOnly:
body += ',"IdOnly":true'
body += '}'
return body
def fileRead(filelocation):
file = open(filelocation, 'r')
obj = '{'
for line in file:
line = line.replace('\n', '')
line = line.replace('\t', '')
tokens = line.split("=",1)
if len(tokens) == 2:
obj = obj + '"'+tokens[0].strip()+'":"'+tokens[1].strip()+'",'
obj = obj[:-1]
obj = obj + '}'
return obj
|
11585476
|
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import xmltodict
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
import json
def response_parser(response, present='dict'):
""" Convert Hikvision results
"""
if isinstance(response, (list,)):
result = "".join(response)
else:
result = response.text
if present == 'dict':
if isinstance(response, (list,)):
events = []
for event in response:
e = json.loads(json.dumps(xmltodict.parse(event)))
events.append(e)
return events
return json.loads(json.dumps(xmltodict.parse(result)))
else:
return result
class Client:
def __init__(self, host, login=None, password=<PASSWORD>, timeout=3, isapi_prefix='ISAPI'):
self.host = host
self.login = login
self.password = password
self.timeout = float(timeout)
self.isapi_prefix = isapi_prefix
self.req = self._check_session()
self.count_events = 1
def _check_session(self):
"""Check the connection with device
:return request.session() object
"""
full_url = urljoin(self.host, self.isapi_prefix + '/System/status')
session = requests.session()
session.auth = HTTPBasicAuth(self.login, self.password)
response = session.get(full_url)
if response.status_code == 401:
session.auth = HTTPDigestAuth(self.login, self.password)
response = session.get(full_url)
response.raise_for_status()
return session
def getNumberPlates(self):
payload = "<AfterTime><picTime>%s</picTime></AfterTime>".format("0")
response = self.req.request(
method='post', url="http://192.168.1.20/ISAPI/Traffic/channels/1/vehicleDetect/plates", timeout=self.timeout, stream=True, data=payload)
return response
cam = Client('http://192.168.1.20', 'admin', '<PASSWORD>')
res = cam.getNumberPlates()
print(res.text)
print(response_parser(res))
|
11585477
|
import yt
# target file
file = 'Data_000000'
# load data
ds = yt.load( file )
ad = ds.all_data()
# filter data if required
# ref: https://yt-project.org/docs/dev/analyzing/filtering.html#cut-regions
dense = ad.cut_region( ['obj["Dens"]>1.0e2'] )
# calculate center-of-mass
cm = dense.quantities.weighted_average_quantity( ['x','y','z'], weight='cell_mass' )
print( 'CM = (%13.7e, %13.7e, %13.7e) in code units' % (cm[0].in_units('code_length'),
cm[1].in_units('code_length'),
cm[2].in_units('code_length')) )
|
11585478
|
import sys
from xrspatial import __main__ as m
from unittest.mock import patch
import pytest
# test_args include copy-examples, fetch-data, or examples (does both)
@pytest.mark.skip(reason="meant only for internal use")
def run_examples_cmds(*cli_cmds):
"""
Run conda package cli commands to download examples and fetch data
for notebooks.
Parameters: 'copy-examples', 'fetch-data', 'examples'
Returns: downloads examples and data to new xrspatial-examples
directory in xarray-spatialx
"""
for arg in cli_cmds:
with patch.object(sys, 'argv', ['xrspatial', arg]):
m.main()
|
11585494
|
from calendar import timegm
from datetime import datetime
from typing import Dict, List, Optional, TypeVar
from uuid import uuid4
import pytest
from _pytest.monkeypatch import MonkeyPatch
from fastapi import FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.status import (
HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
HTTP_403_FORBIDDEN,
HTTP_404_NOT_FOUND,
HTTP_409_CONFLICT,
)
import fastapi_auth.auth_app
from fastapi_auth.auth_app import AdminAuthEndpointName, AuthEndpointName, get_epoch, remove_expired_tokens
from fastapi_auth.auth_settings import get_auth_settings
from fastapi_auth.fastapi_util.api_model import APIMessage
from fastapi_auth.fastapi_util.util.session import context_session
from fastapi_auth.models.auth import AuthTokens
from fastapi_auth.models.user import UserID
from fastapi_auth.orm.refresh_token import RefreshToken
from fastapi_auth.security.json_web_token import generate_tokens
from fastapi_auth.util.cache import clear_caches
from tests.test_auth_app.test_endpoints.shared import (
TestAuthApiBase,
admin_username2,
email3,
<PASSWORD>,
<PASSWORD>,
<PASSWORD>,
username1,
username2,
username3,
)
from tests.util.custom_user import User, UserCreateRequest, UserResult, UserUpdate
AuthUser = UserResult
AuthUpdateRequest = UserUpdate
T = TypeVar("T")
class TestDebug(TestAuthApiBase):
fixture_names = ("debug_auth_app",)
debug_auth_app: FastAPI
@property
def auth_app(self) -> FastAPI:
return self.debug_auth_app
@pytest.fixture(scope="module")
def registered_user(self, monkeypatch_module: MonkeyPatch, debug_auth_app: FastAPI) -> AuthUser:
self.debug_auth_app = debug_auth_app
return self._get_registration_response(AuthUser)
@pytest.fixture(scope="module")
def admin_tokens(self, debug_auth_app: FastAPI) -> AuthTokens:
self.debug_auth_app = debug_auth_app
tokens = self._get_admin_tokens(admin_scope=True)
assert tokens.expires_in is None
return tokens
@pytest.fixture(scope="module")
def non_admin_tokens(self, debug_auth_app: FastAPI) -> AuthTokens:
self.debug_auth_app = debug_auth_app
return self._get_admin_tokens(admin_scope=False)
@pytest.fixture(scope="module")
def admin_access_headers(self, admin_tokens: AuthTokens, debug_auth_app: FastAPI) -> Dict[str, str]:
self.debug_auth_app = debug_auth_app
return {"authorization": f"bearer {admin_tokens.access_token}"}
@pytest.fixture(scope="module")
def non_admin_access_headers(self, non_admin_tokens: AuthTokens, debug_auth_app: FastAPI) -> Dict[str, str]:
self.debug_auth_app = debug_auth_app
return {"authorization": f"bearer {non_admin_tokens.access_token}"}
@pytest.fixture(scope="module")
def refresh_headers(self, non_admin_tokens: AuthTokens, debug_auth_app: FastAPI) -> Dict[str, str]:
self.debug_auth_app = debug_auth_app
return {"authorization": f"bearer {non_admin_tokens.refresh_token}"}
def test_login(self, admin_tokens: AuthTokens) -> None:
assert admin_tokens.token_type == "bearer" # the fixture is the test
def test_login_password_fails(self) -> None:
admin_username = get_auth_settings().first_superuser
assert admin_username is not None
bad_password = "<PASSWORD>"
message = self._get_login_response(admin_username, bad_password, APIMessage, HTTP_401_UNAUTHORIZED)
assert message.detail == "Incorrect password"
def test_login_username_fails(self) -> None:
bad_username = "<EMAIL>"
bad_password = "<PASSWORD>"
message = self._get_login_response(bad_username, bad_password, APIMessage, HTTP_401_UNAUTHORIZED)
assert message.detail == "User not found"
def test_read_users(self, admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.read_users)
self.request("GET", url, List[AuthUser], headers=admin_access_headers)
def test_register(self, registered_user: AuthUser) -> None:
assert registered_user.username == username1
# Confirm login works for the new user
login_url = self.debug_auth_app.url_path_for(AuthEndpointName.login)
body_data = {"username": username1, "password": <PASSWORD>}
self.request("POST", login_url, AuthTokens, HTTP_200_OK, data=body_data)
def test_create_user(
self, admin_access_headers: Dict[str, str], non_admin_access_headers: Dict[str, str], registered_user: AuthUser
) -> None:
user_create_request_1 = UserCreateRequest(username=username1, password=<PASSWORD>)
create_user_url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.create_user)
body_json = jsonable_encoder(user_create_request_1)
message = self.request("POST", create_user_url, APIMessage, HTTP_401_UNAUTHORIZED, json=body_json)
assert message.detail == "Not authenticated"
message = self.request(
"POST", create_user_url, APIMessage, HTTP_403_FORBIDDEN, json=body_json, headers=non_admin_access_headers
)
assert message.detail == "Insufficient permissions"
user_create_request_2 = UserCreateRequest(username=username3, password=<PASSWORD>, email=email3)
body_json = jsonable_encoder(user_create_request_2)
created_user_model = self.request(
"POST", create_user_url, AuthUser, json=body_json, headers=admin_access_headers
)
assert created_user_model.username == username3
assert created_user_model.email == email3
with context_session() as session:
created_user: User = session.query(User).get(created_user_model.user_id)
assert created_user is not None
assert not created_user.is_superuser
assert created_user.email == email3
def test_read_user(self, admin_access_headers: Dict[str, str], registered_user: AuthUser) -> None:
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.read_user, user_id=str(registered_user.user_id))
auth_user = self.request("GET", url, AuthUser, HTTP_200_OK, headers=admin_access_headers)
assert auth_user.username == registered_user.username
assert auth_user.username == registered_user.username
def test_read_user_fails(self, admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.read_user, user_id=str(uuid4()))
message = self.request("GET", url, APIMessage, HTTP_404_NOT_FOUND, headers=admin_access_headers)
assert message.detail == "User not found"
def test_reregister_fails(self, monkeypatch: MonkeyPatch, registered_user: AuthUser) -> None:
monkeypatch.setenv("API_DEBUG", "0")
clear_caches()
message = self._get_registration_response(APIMessage, HTTP_409_CONFLICT)
assert message.detail == "This username is already in use"
def test_refresh(self, admin_access_headers: Dict[str, str], refresh_headers: Dict[str, str]) -> None:
refresh_url = self.debug_auth_app.url_path_for(AuthEndpointName.refresh)
self.request("POST", refresh_url, AuthTokens, HTTP_200_OK, headers=refresh_headers)
message = self.request("POST", refresh_url, APIMessage, HTTP_401_UNAUTHORIZED, headers=refresh_headers)
assert message.detail == "Provided token was not a valid refresh token"
message = self.request("POST", refresh_url, APIMessage, HTTP_401_UNAUTHORIZED, headers=admin_access_headers)
assert message.detail == "Provided token was not a valid refresh token"
@pytest.mark.parametrize(
"headers", [None, {"authorization": f"bearer"}, {"authorization": f"bearer "}, {"authorization": f"bearer bad"}]
)
def test_token_problems(self, headers: Optional[Dict[str, str]], admin_tokens: AuthTokens) -> None:
if headers is None:
# Workaround for accessing fixtures in parametrized inputs
headers = {"authorization": f"nonbearer {admin_tokens.refresh_token}"}
url = self.debug_auth_app.url_path_for(AuthEndpointName.refresh)
message = self.request("POST", url, APIMessage, HTTP_401_UNAUTHORIZED, headers=headers)
assert message.detail == "Invalid token", headers
def test_refresh_fails(self) -> None:
url = self.debug_auth_app.url_path_for(AuthEndpointName.refresh)
fake_auth_user = AuthUser(user_id=UserID(uuid4()), username="<EMAIL>")
invalid_user_tokens = generate_tokens(user_id=fake_auth_user.user_id, is_superuser=False, scopes=[])
headers = {"authorization": f"bearer {invalid_user_tokens.refresh.encoded}"}
message = self.request("POST", url, APIMessage, HTTP_401_UNAUTHORIZED, headers=headers)
assert message.detail == "User not found; try logging in again"
def test_validate(
self,
admin_access_headers: Dict[str, str],
non_admin_access_headers: Dict[str, str],
refresh_headers: Dict[str, str],
) -> None:
url = self.debug_auth_app.url_path_for(AuthEndpointName.validate_token)
for headers in [admin_access_headers, non_admin_access_headers, refresh_headers]:
message = self.request("GET", url, APIMessage, HTTP_200_OK, headers=headers)
assert message.detail == "Token is valid for user"
def test_validate_fails(self) -> None:
fake_auth_user = AuthUser(user_id=UserID(uuid4()), username="<EMAIL>")
invalid_user_tokens = generate_tokens(user_id=fake_auth_user.user_id, is_superuser=False, scopes=[])
headers = {"authorization": f"bearer {invalid_user_tokens.access.encoded}"}
url = self.debug_auth_app.url_path_for(AuthEndpointName.validate_token)
message = self.request("GET", url, APIMessage, HTTP_401_UNAUTHORIZED, headers=headers)
assert message.detail == "User not found"
def test_update_username(self, registered_user: AuthUser, admin_access_headers: Dict[str, str]) -> None:
payload = jsonable_encoder(AuthUpdateRequest(username=username2).dict(exclude_unset=True))
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.update_user, user_id=str(registered_user.user_id))
updated_user = self.request("PATCH", url, AuthUser, headers=admin_access_headers, json=payload)
assert updated_user.user_id == registered_user.user_id
assert registered_user.username != username2
assert updated_user.username == username2
# Ensure registered_user still has a valid username
registered_user.username = username2
self._get_login_response(registered_user.username, password1, AuthTokens)
def test_update_password(self, registered_user: AuthUser, admin_access_headers: Dict[str, str]) -> None:
payload = jsonable_encoder(AuthUpdateRequest(password=<PASSWORD>).dict(exclude_unset=True))
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.update_user, user_id=str(registered_user.user_id))
updated_user = self.request("PATCH", url, AuthUser, headers=admin_access_headers, json=payload)
assert updated_user.user_id == registered_user.user_id
assert registered_user.username == updated_user.username
# Ensure new password succeeds for login
self._get_login_response(registered_user.username, password2, AuthTokens)
def test_empty_update_fails(self, registered_user: AuthUser, admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.update_user, user_id=str(registered_user.user_id))
payload = jsonable_encoder(AuthUpdateRequest().dict(exclude_unset=True))
message = self.request(
"PATCH", url, APIMessage, HTTP_400_BAD_REQUEST, headers=admin_access_headers, json=payload
)
assert message.detail == "Nothing to update"
def test_missing_user_update_fails(self, registered_user: AuthUser, admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AdminAuthEndpointName.update_user, user_id=str(uuid4()))
payload = jsonable_encoder(AuthUpdateRequest(username="<EMAIL>").dict(exclude_unset=True))
message = self.request("PATCH", url, APIMessage, HTTP_404_NOT_FOUND, headers=admin_access_headers, json=payload)
assert message.detail == "User not found"
def test_admin_fails(self, registered_user: AuthUser) -> None:
message = self._get_login_response(
registered_user.username, password2, APIMessage, HTTP_403_FORBIDDEN, admin_scope=True
)
assert message.detail == "Insufficient permissions"
def test_logout(self) -> None:
tokens = self._get_admin_tokens()
url = self.debug_auth_app.url_path_for(AuthEndpointName.logout)
refresh_headers = {"authorization": f"bearer {tokens.refresh_token}"}
message = self.request("GET", url, APIMessage, headers=refresh_headers)
assert message.detail == "Logged out successfully"
message = self.request("GET", url, APIMessage, HTTP_401_UNAUTHORIZED, headers=refresh_headers)
assert message.detail == "Provided token was not a valid refresh token"
def test_logout_all(self) -> None:
tokens = self._get_admin_tokens()
tokens2 = self._get_admin_tokens()
refresh_headers = {"authorization": f"bearer {tokens.refresh_token}"}
refresh_headers2 = {"authorization": f"bearer {tokens2.refresh_token}"}
url = self.debug_auth_app.url_path_for(AuthEndpointName.logout_all)
message = self.request("GET", url, APIMessage, headers=refresh_headers)
assert message.detail == "Logged out all devices successfully"
message = self.request("GET", url, APIMessage, HTTP_401_UNAUTHORIZED, headers=refresh_headers2)
assert message.detail == "Provided token was not a valid refresh token"
def test_scopes(self, registered_user: AuthUser) -> None:
with pytest.raises(HTTPException) as exc_info:
generate_tokens(user_id=registered_user.user_id, is_superuser=False, scopes=["unexpected"])
assert exc_info.value.status_code == HTTP_401_UNAUTHORIZED
assert exc_info.value.detail == "Unrecognized scope: 'unexpected'"
def test_remove_expired_tokens(self, monkeypatch: MonkeyPatch) -> None:
auth_settings = get_auth_settings()
def real_get_epoch() -> int:
return timegm(datetime.utcnow().utctimetuple()) # seconds since epoch
assert abs(get_epoch() - real_get_epoch()) <= 1
def refresh_not_expired_epoch() -> int:
return real_get_epoch() + auth_settings.refresh_token_expire_seconds - 100
def refresh_expired_epoch() -> int:
return real_get_epoch() + auth_settings.refresh_token_expire_seconds + 100
with context_session() as session:
assert len(session.query(RefreshToken).all()) == 3
assert remove_expired_tokens(db=session) == 0
assert len(session.query(RefreshToken).all()) == 3
monkeypatch.setattr(fastapi_auth.auth_app, "get_epoch", refresh_not_expired_epoch)
assert remove_expired_tokens(db=session) == 0
assert len(session.query(RefreshToken).all()) == 3
monkeypatch.setattr(fastapi_auth.auth_app, "get_epoch", refresh_expired_epoch)
assert remove_expired_tokens(db=session) == 3
assert len(session.query(RefreshToken).all()) == 0
def test_read_self(self, non_admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AuthEndpointName.read_self)
self_user = self.request("GET", url, AuthUser, headers=non_admin_access_headers)
assert self_user.username == get_auth_settings().first_superuser
def test_update_self_fails(self, non_admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AuthEndpointName.update_self)
payload = jsonable_encoder(AuthUpdateRequest(username=username2))
message = self.request(
"PATCH", url, APIMessage, HTTP_409_CONFLICT, headers=non_admin_access_headers, json=payload
)
assert message.detail == "There was a conflict with an existing user"
def test_update_self(self, non_admin_access_headers: Dict[str, str]) -> None:
url = self.debug_auth_app.url_path_for(AuthEndpointName.update_self)
payload = jsonable_encoder(AuthUpdateRequest(username=admin_username2))
updated_user = self.request("PATCH", url, AuthUser, headers=non_admin_access_headers, json=payload)
assert updated_user.username == admin_username2
|
11585514
|
import random
import numpy as np
# tabular Q-learning where states and actions
# are discrete and stored in a table
class QLearner(object):
def __init__(self, state_dim,
num_actions,
init_exp=0.5, # initial exploration prob
final_exp=0.0, # final exploration prob
anneal_steps=500, # N steps for annealing exploration
alpha = 0.2,
discount_factor=0.9): # discount future rewards
# Q learning parameters
self.state_dim = state_dim
self.num_actions = num_actions
self.exploration = init_exp
self.init_exp = init_exp
self.final_exp = final_exp
self.anneal_steps = anneal_steps
self.discount_factor = discount_factor
self.alpha = alpha
# counters
self.train_iteration = 0
# table of q values
self.qtable = np.random.uniform(low=-1, high=1, size=(state_dim, num_actions))
def initializeState(self, state):
self.state = state
self.action = self.qtable[state].argsort()[-1]
return self.action
# select action based on epsilon-greedy strategy
def eGreedyAction(self, state):
if self.exploration > random.random():
action = random.randint(0, self.num_actions-1)
else:
action = self.qtable[state].argsort()[-1]
return action
# do one value iteration update
def updateModel(self, state, reward):
action = self.eGreedyAction(state)
self.train_iteration += 1
self.annealExploration()
self.qtable[self.state, self.action] = (1 - self.alpha) * self.qtable[self.state, self.action] + self.alpha * (reward + self.discount_factor * self.qtable[state, action])
self.state = state
self.action = action
return self.action
# anneal learning rate
def annealExploration(self, stategy='linear'):
ratio = max((self.anneal_steps - self.train_iteration)/float(self.anneal_steps), 0)
self.exploration = (self.init_exp - self.final_exp) * ratio + self.final_exp
|
11585580
|
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.kmeans import H2OKMeansEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_PUBDEV_2980_kmeans:
"""
PUBDEV-2980: kmeans return the field model._model_json['output']['summary']
as null sometimes. Normally, it is twoDimensionTable. Sometimes, it is None.
This class is created to train a kmeans model with different parameters settings and re-create two
models with different field type for debugging purposes.
"""
# parameters denoting filenames of interested
training1_filenames = "smalldata/gridsearch/kmeans_8_centers_3_coords.csv"
test_name = "pyunit_PUBDEV_2980_kmeans.py" # name of this test
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
training1_data = [] # store training data sets
test_failed = 0 # count total number of tests that have failed
def __init__(self):
self.setup_data()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices
"""
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filenames))
self.x_indices = list(range(self.training1_data.ncol))
def test_kmeans_fields(self):
"""
test_kmeans_grid_search_over_validation_datasets performs the following:
a. build H2O kmeans models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
b. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O kmeans model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
"""
print("*******************************************************************************************")
h2o.cluster_info()
good_params_list = {'max_iterations': 20, 'k': 6, 'init': 'Furthest', 'seed': 1464891169}
good_model_params = {'max_runtime_secs': 0.014673351}
good_model = H2OKMeansEstimator(**good_params_list)
good_model.train(x=self.x_indices, training_frame=self.training1_data,
**good_model_params)
bad_params_list = {'init': 'Random', 'seed': 1464888628, 'k': 6, 'max_iterations': 0}
bad_model_params = {'max_runtime_secs': 0.007948218600000001}
bad_model = H2OKMeansEstimator(**bad_params_list)
bad_model.train(x=self.x_indices, training_frame=self.training1_data,
**bad_model_params)
good_model_type = type(good_model._model_json['output']['model_summary'])
bad_model_type = type(bad_model._model_json['output']['model_summary'])
print("good_model._model_json['output']['model_summary'] type is {0}. "
"bad_model._model_json['output']['model_summary'] type is "
"{1}".format(good_model_type, bad_model_type))
if not(good_model_type == bad_model_type):
print("They are not equal for some reason....")
self.test_failed = 1
else:
print("The fields are of the same type.")
def test_PUBDEV_2980_for_kmeans():
"""
Create and instantiate class and perform tests specified for kmeans
:return: None
"""
test_kmeans_grid = Test_PUBDEV_2980_kmeans()
test_kmeans_grid.test_kmeans_fields()
sys.stdout.flush()
if test_kmeans_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_PUBDEV_2980_for_kmeans)
else:
test_PUBDEV_2980_for_kmeans()
|
11585582
|
import os, copy, torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
import torchvision
from torchvision import datasets, models, transforms
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from utils.metrics import *
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run with apex.")
import torch.multiprocessing as mp
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def apply_scheduler(optimizer, lr_policy, num_epoch=None, total_num_epoch=None):
if lr_policy == 'linear':
# num_epoch with initial lr
# rest of epoch linearly decrease to 0 (the last epoch is not 0)
def lambda_rule(epoch):
# lr_l = 1.0 - max(0, epoch + 1 + epoch_count - niter) / float(niter_decay + 1)
lr_l = 1.0 - max(0, epoch + 1 - num_epoch) / float(total_num_epoch - num_epoch + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
elif lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', lr_policy)
return scheduler
class base_model(nn.Module):
def __init__(self, args):
super(base_model, self).__init__()
self.device = args.device
self.isTrain = args.isTrain
self.project_name = args.project_name
self.exp_dir = args.exp_dir
self.use_tensorboardX = True
self.use_apex = True
self.cropSize = args.cropSize # patch size for training the model. Default: [240, 320]
self.cropSize_h, self.cropSize_w = self.cropSize[0], self.cropSize[1]
self.batch_size = args.batch_size
self.total_epoch_num = args.total_epoch_num # total number of epoch in training
self.save_steps = 5
self.task_lr = 1e-4 # default task learning rate
self.D_lr = 5e-5 # default discriminator learning rate
self.G_lr = 5e-5 # default generator learning rate
self.real_label = 1
self.syn_label = 0
def _initialize_training(self):
if self.project_name is not None:
self.save_dir = os.path.join(self.exp_dir, self.project_name)
else:
self.project_name = self._get_project_name()
self.save_dir = os.path.join(self.exp_dir, self.project_name)
print('project name: {}'.format(self.project_name))
print('save dir: {}'.format(self.save_dir))
if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)
self.train_log = os.path.join(self.save_dir, 'train.log')
self.evaluate_log = os.path.join(self.save_dir, 'evaluate.log')
self.file_to_note_bestModel = os.path.join(self.save_dir,'note_bestModel.log')
if self.use_tensorboardX:
self.tensorboard_train_dir = os.path.join(self.save_dir, 'tensorboardX_train_logs')
self.train_SummaryWriter = SummaryWriter(self.tensorboard_train_dir)
self.tensorboard_eval_dir = os.path.join(self.save_dir, 'tensorboardX_eval_logs')
self.eval_SummaryWriter = SummaryWriter(self.tensorboard_eval_dir)
# self.train_display_freq = 500
# self.val_write_freq = 10
self.tensorboard_num_display_per_epoch = 5
self.val_display_freq = 10
def _initialize_networks(self):
for name, model in self.model_dict.items():
model.train().to(self.device)
init_weights(model, net_name=name, init_type='normal', gain=0.02)
def _get_scheduler(self, optim_type='linear'):
'''
if type is None -> all optim use default scheduler
if types is str -> all optim use this types of scheduler
if type is list -> each optim use their own scheduler
'''
self.scheduler_list = []
if isinstance(optim_type, str):
for name in self.optim_name:
self.scheduler_list.append(apply_scheduler(getattr(self, name), lr_policy=optim_type, num_epoch=0.6*self.total_epoch_num,
total_num_epoch=self.total_epoch_num))
elif isinstance(optim_type, list):
for name, optim in zip(self.optim_name, optim_type):
self.scheduler_list.append(apply_scheduler(getattr(self, name), lr_policy=optim, num_epoch=0.6*self.total_epoch_num,
total_num_epoch=self.total_epoch_num))
else:
raise RuntimeError("optim type should be either string or list!")
def _init_apex(self, Num_losses):
model_list = []
optim_list = []
for m in self.model_name:
model_list.append(getattr(self, m))
for o in self.optim_name:
optim_list.append(getattr(self, o))
model_list, optim_list = amp.initialize(model_list, optim_list, opt_level="O1", num_losses=Num_losses)
def _check_parallel(self):
if torch.cuda.device_count() > 1:
for name in self.model_name:
setattr(self, name, nn.DataParallel(getattr(self, name)))
def _check_distribute(self):
# not ready to use yet
if torch.cuda.device_count() > 1:
# world size is number of process participat in the job
# torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
# mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
if use_apex:
setattr(self, name, apex.parallel.DistributedDataParallel(getattr(self, name)))
else:
for name in self.model_name:
setattr(self, name, nn.DistributedDataParallel(getattr(self, name)))
def _set_models_train(self, model_name):
for name in model_name:
getattr(self, name).train()
def _set_models_eval(self, model_name):
for name in model_name:
getattr(self, name).eval()
def _set_models_float(self, model_name):
for name in model_name:
for layers in getattr(self, name).modules():
layers.float()
def save_models(self, model_list, mode, save_list=None):
'''
mode include best, latest, or a number (epoch)
save as non-dataparallel state_dict
save_list is used when we save model as a different name for later use
'''
if not save_list:
for model_name in model_list:
if mode == 'latest':
path_to_save_paramOnly = os.path.join(self.save_dir, 'latest_{}.pth'.format(model_name))
elif mode == 'best':
path_to_save_paramOnly = os.path.join(self.save_dir, 'best_{}.pth'.format(model_name))
elif isinstance(mode, int):
path_to_save_paramOnly = os.path.join(self.save_dir, 'epoch-{}_{}.pth'.format(str(mode), model_name))
try:
state_dict = getattr(self, model_name).module.state_dict()
except AttributeError:
state_dict = getattr(self, model_name).state_dict()
model_weights = copy.deepcopy(state_dict)
torch.save(model_weights, path_to_save_paramOnly)
else:
assert len(model_list) == len(save_list)
for save_name, model_name in zip(save_list, model_list):
if mode == 'latest':
path_to_save_paramOnly = os.path.join(self.save_dir, 'latest_{}.pth'.format(save_name))
elif mode == 'best':
path_to_save_paramOnly = os.path.join(self.save_dir, 'best_{}.pth'.format(save_name))
elif isinstance(mode, int):
path_to_save_paramOnly = os.path.join(self.save_dir, 'epoch-{}_{}.pth'.format(str(mode), save_name))
try:
state_dict = getattr(self, model_name).module.state_dict()
except AttributeError:
state_dict = getattr(self, model_name).state_dict()
model_weights = copy.deepcopy(state_dict)
torch.save(model_weights, path_to_save_paramOnly)
def _load_models(self, model_list, mode, isTrain=False, model_path=None):
if model_path is None:
model_path = self.save_dir
for model_name in model_list:
if mode == 'latest':
path = os.path.join(model_path, 'latest_{}.pth'.format(model_name))
elif mode == 'best':
path = os.path.join(model_path, 'best_{}.pth'.format(model_name))
elif isinstance(mode, int):
path = os.path.join(model_path, 'epoch-{}_{}.pth'.format(str(mode), model_name))
else:
raise RuntimeError("Mode not implemented")
state_dict = torch.load(path)
try:
getattr(self, model_name).load_state_dict(state_dict)
except RuntimeError:
# in the case of parallel model loading non-parallel state_dict || add module to all keys
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = 'module.' + k # add `module.`
new_state_dict[name] = v
getattr(self, model_name).load_state_dict(new_state_dict)
if isTrain:
getattr(self, model_name).to(self.device).train()
else:
getattr(self, model_name).to(self.device).eval()
def save_tensor2np(self, tensor, name, epoch, path=None):
# not ready to use in this project
if path == None:
path = self.save_dir
generated_sample = tensor.detach().cpu().numpy()
generated_sample_save_path = os.path.join(path, 'tensor2np', 'Epoch-%s_%s.npy' % (epoch, name))
if not os.path.exists(os.path.join(path, 'tensor2np')):
os.makedirs(os.path.join(path, 'tensor2np'))
np.save(generated_sample_save_path, generated_sample)
def write_2_tensorboardX(self, writer, input_tensor, name, mode, count, nrow=None, normalize=True, value_range=(-1.0, 1.0)):
if mode == 'image':
if not nrow:
raise RuntimeError('tensorboardX: must specify number of rows in image mode')
grid = make_grid(input_tensor, nrow=nrow, normalize=normalize, range=value_range)
writer.add_image(name, grid, count)
elif mode == 'scalar':
if isinstance(input_tensor, list) and isinstance(name, list):
assert len(input_tensor) == len(name)
for n, t in zip(name, input_tensor):
writer.add_scalar(n, t, count)
else:
writer.add_scalar(name, input_tensor, count)
else:
raise RuntimeError('tensorboardX: this mode is not yet implemented')
|
11585612
|
from maneuvers.strikes.double_touch import DoubleTouch
from maneuvers.dribbling.carry_and_flick import CarryAndFlick
from maneuvers.maneuver import Maneuver
from maneuvers.strikes.aerial_strike import AerialStrike, FastAerialStrike
from maneuvers.strikes.close_shot import CloseShot
from maneuvers.strikes.dodge_strike import DodgeStrike
from maneuvers.strikes.ground_strike import GroundStrike
from maneuvers.strikes.mirror_strike import MirrorStrike
from maneuvers.strikes.strike import Strike
from rlutilities.linear_algebra import vec3, dot
from rlutilities.simulation import Car, Ball
from tools.game_info import GameInfo
from tools.intercept import Intercept
from tools.vector_math import distance, ground_distance, align
def direct_shot(info: GameInfo, car: Car, target: vec3) -> Strike:
dodge_shot = DodgeStrike(car, info, target)
ground_shot = GroundStrike(car, info, target)
if car.boost > 40: # TODO
# aerial_strike = AerialStrike(car, info, target)
fast_aerial = FastAerialStrike(car, info, target)
better_aerial_strike = min([fast_aerial], key=lambda strike: strike.intercept.time)
if (
better_aerial_strike.intercept.time < dodge_shot.intercept.time
and abs(better_aerial_strike.intercept.position[1] - info.their_goal.center[1]) > 500
):
if ground_distance(better_aerial_strike.intercept, info.their_goal.center) < 8000:
return DoubleTouch(better_aerial_strike)
return better_aerial_strike
if (
dodge_shot.intercept.time < ground_shot.intercept.time - 0.1
or ground_distance(dodge_shot.intercept, target) < 2000
or distance(ground_shot.intercept.ball.velocity, car.velocity) < 500
or is_opponent_close(info, 300)
):
if (
distance(dodge_shot.intercept.ground_pos, target) < 4000
and abs(dodge_shot.intercept.ground_pos[0]) < 2000
):
return CloseShot(car, info, target)
return dodge_shot
return ground_shot
def any_shot(info: GameInfo, car: Car, target: vec3, intercept: Intercept, allow_dribble=False) -> Maneuver:
ball = intercept.ball
if (
allow_dribble
and (ball.position[2] > 100 or abs(ball.velocity[2]) > 250 or distance(car, info.ball) < 300)
and abs(ball.velocity[2]) < 700
and ground_distance(car, ball) < 1500
and ground_distance(ball, info.my_goal.center) > 1000
and ground_distance(ball, info.their_goal.center) > 1000
and not is_opponent_close(info, info.ball.position[2] * 2 + 1000)
):
return CarryAndFlick(car, info, target)
direct = direct_shot(info, car, target)
if not isinstance(direct, GroundStrike) and intercept.time < car.time + 4.0:
alignment = align(car.position, ball, target)
if alignment < -0.3 and abs(ball.position[1] - target[1]) > 3000:
return MirrorStrike(car, info, target)
return direct
def is_opponent_close(info: GameInfo, dist: float) -> bool:
for opponent in info.get_opponents():
if ground_distance(opponent.position + opponent.velocity * 0.5, info.ball) < dist:
return True
return False
|
11585622
|
from datetime import datetime
from django.test import TestCase
from simple_history.signals import (
post_create_historical_record,
pre_create_historical_record,
)
from ..models import Poll
today = datetime(2021, 1, 1, 10, 0)
class PrePostCreateHistoricalRecordSignalTest(TestCase):
def setUp(self):
self.signal_was_called = False
self.signal_instance = None
self.signal_history_instance = None
self.signal_sender = None
def test_pre_create_historical_record_signal(self):
def handler(sender, instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = kwargs["history_instance"]
self.signal_sender = sender
pre_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
def test_post_create_historical_record_signal(self):
def handler(sender, instance, history_instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = history_instance
self.signal_sender = sender
post_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
|
11585638
|
class BinTree:
'''
Root: integer value
Left: BinTree
Right: BinTree
if left and right are both None, the tree is a leaf (an ending node)
'''
def __init__(self, root, left, right):
self.root = root
self.left = left
self.right = right
'''
Converts the Binary Tree into a list
The first element is None.
Each element in the list after the first is a root in the tree.
The children of any element is at the (2i, 2[i+1]) indices
The tree given in the README would become [None, 4, 2, 7, 1, 3, 6, 9]
The children to (4) is located at (2, 3) or (2*1, 2*(1+1))
The children to (2) is located at (4, 5) or (2*2, 2*(2+1))
The children to (7) is located at (6, 7) or (2*3, 2*(3+1))
If no child root exist, that place is replaced with None
Used only for testing purposes
'''
def convertToLst(self):
lst = [None]
queue = [self]
lst.append(self.root)
# Breadth first search of BST
while len(queue) > 0:
v = queue.pop(0)
i = lst.index(v.root)
if len(lst) < 2*i:
while (len(lst) < 2*i):
lst.append(None)
if v.left:
queue.append(v.left)
lst.append(v.left.root)
else:
lst.append(None)
if v.right:
queue.append(v.right)
lst.append(v.right.root)
else:
lst.append(None)
return lst
'''
Solution to Challenge 4
Directly modifies self
Reverses the subtrees
Switches the subtrees s.t. self.left = self.right, self.right = self.left
'''
def reverse(self):
reversedLeft = None
reversedRight = None
if self.left:
reversedLeft = self.left.reverse()
if self.right:
reversedRight = self.right.reverse()
self.left = reversedRight
self.right = reversedLeft
return self
leaf1 = BinTree(1, None, None)
leaf2 = BinTree(3, None, None)
leaf3 = BinTree(6, None, None)
leaf4 = BinTree(9, None, None)
leftTree = BinTree(2, leaf1, leaf2)
rightTree = BinTree(7, leaf3, leaf4)
tree = BinTree(4, leftTree, rightTree)
print(tree.convertToLst())
tree.reverse()
print(tree.convertToLst())
|
11585666
|
from torch.utils.data import DataLoader
from ..util import barit, DirectoryDataset, LoadedDataset
from ..hdr.io import imread
from .opts import fetch_opts
def _custom_get_item(self, index):
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def torchvision_dataset(transform=None, target_transform=None, train=True, subset=None):
"""Creates a dataset from torchvision, configured using Command Line Arguments.
Args:
transform (callable, optional): A function that transforms an image (default None).
target_transform (callable, optional): A function that transforms a label (default None).
train (bool, optional): Training set or validation - if applicable (default True).
subset (string, optional): Specifies the subset of the relevant
categories, if any of them was split (default, None).
Relevant Command Line Arguments:
- **dataset**: `--data`, `--torchvision_dataset`.
Note:
Settings are automatically acquired from a call to :func:`dlt.config.parse`
from the built-in ones. If :func:`dlt.config.parse` was not called in the
main script, this function will call it.
Warning:
Unlike the torchvision datasets, this function returns a dataset that
uses NumPy Arrays instead of a PIL Images.
"""
opts = fetch_opts(['dataset'], subset)
if opts.torchvision_dataset is None:
if subset is not None:
apnd = '_' + subset
else:
apnd = ''
raise ValueError('No value given for --torchvision_dataset{0}.'.format(apnd))
if opts.torchvision_dataset == 'mnist':
from torchvision.datasets import MNIST
MNIST.__getitem__ = _custom_get_item
ret_dataset = MNIST(opts.data, train=train, download=True, transform=transform,
target_transform=target_transform)
# Add channel dimension and make numpy for consistency
if train:
ret_dataset.train_data = ret_dataset.train_data.unsqueeze(3).numpy()
ret_dataset.train_labels = ret_dataset.train_labels.numpy()
else:
ret_dataset.test_data = ret_dataset.test_data.unsqueeze(3).numpy()
ret_dataset.test_labels = ret_dataset.test_labels.numpy()
elif opts.torchvision_dataset == 'fashionmnist':
from torchvision.datasets import FashionMNIST
FashionMNIST.__getitem__ = _custom_get_item
ret_dataset = FashionMNIST(opts.data, train=train, download=True, transform=transform,
target_transform=target_transform)
if train:
ret_dataset.train_data = ret_dataset.train_data.unsqueeze(3).numpy()
ret_dataset.train_labels = ret_dataset.train_labels.numpy()
else:
ret_dataset.test_data = ret_dataset.test_data.unsqueeze(3).numpy()
ret_dataset.test_labels = ret_dataset.test_labels.numpy()
elif opts.torchvision_dataset == 'cifar10':
from torchvision.datasets import CIFAR10
CIFAR10.__getitem__ = _custom_get_item
ret_dataset = CIFAR10(opts.data, train=train, download=True, transform=transform,
target_transform=target_transform)
elif opts.torchvision_dataset == 'cifar100':
from torchvision.datasets import CIFAR100
CIFAR100.__getitem__ = _custom_get_item
ret_dataset = CIFAR100(opts.data, train=train, download=True, transform=transform,
target_transform=target_transform)
return ret_dataset
def directory_dataset(load_fn=imread, preprocess=None, subset=None):
"""Creates a :class:`dlt.util.DirectoryDataset`, configured using Command Line Arguments.
Args:
load_fn (callable, optional): Function that loads the data files
(default :func:`dlt.hdr.imread`).
preprocess (callable, optional): A function that takes a single data
point from the dataset to preprocess on the fly (default None).
subset (string, optional): Specifies the subset of the relevant
categories, if any of them was split (default, None).
Relevant Command Line Arguments:
- **dataset**: `--data`, `--load_all`, `--extensions`.
- **dataloader**: `--num_threads`.
Note:
Settings are automatically acquired from a call to :func:`dlt.config.parse`
from the built-in ones. If :func:`dlt.config.parse` was not called in the
main script, this function will call it.
"""
opts = fetch_opts(['dataset', 'dataloader'], subset)
if opts.load_all:
dummy_set = DirectoryDataset(opts.data, extensions=opts.extensions, load_fn=load_fn)
dummy_loader = DataLoader(dummy_set, batch_size=1, num_workers=opts.num_threads, pin_memory=False)
loaded_set = [batch[0].clone().numpy() for batch in barit(dummy_loader, start='Loading')]
ret_dataset = LoadedDataset(loaded_set, preprocess=preprocess)
print('Done loading from {0}'.format(opts.data))
else:
ret_dataset = DirectoryDataset(opts.data, load_fn=load_fn, preprocess=preprocess, extensions=opts.extensions)
print('Created dataset from {0}'.format(opts.data))
return ret_dataset
def loader(dataset, preprocess=None, subset=None, worker_init_fn=None):
"""Creates a torch DataLoader using the dataset, configured using Command Line Arguments.
Args:
dataset (Dataset): A torch compatible dataset.
preprocess (callable, optional): A function that takes a single data
point from the dataset to preprocess on the fly (default None).
subset (string, optional): Specifies the subset of the relevant
categories, if any of them was split (default, None).
Relevant Command Line Arguments:
- **dataloader**: `--batch_size`, `--num_threads`, `--pin_memory`,
`--shuffle`, `--drop_last`.
Note:
Settings are automatically acquired from a call to :func:`dlt.config.parse`
from the built-in ones. If :func:`dlt.config.parse` was not called in the
main script, this function will call it.
"""
opts = fetch_opts(['dataloader'], subset)
return DataLoader(LoadedDataset(dataset,preprocess),
batch_size=opts.batch_size, num_workers=opts.num_threads,
pin_memory=opts.pin_memory, shuffle=opts.shuffle,
drop_last=opts.drop_last, worker_init_fn=worker_init_fn)
|
11585667
|
class AlternationConverter(object,IValueConverter):
"""
Converts an integer to and from an object by applying the integer as an index to a list of objects.
AlternationConverter()
"""
def Convert(self,o,targetType,parameter,culture):
"""
Convert(self: AlternationConverter,o: object,targetType: Type,parameter: object,culture: CultureInfo) -> object
Converts an integer to an object in the System.Windows.Controls.AlternationConverter.Values list.
o: The integer to use to find an object in the System.Windows.Controls.AlternationConverter.Values
property.
targetType: The type of the binding target property.
parameter: The converter parameter to use.
culture: The culture to use in the converter.
Returns: The object that is in the position of o modulo the number of items in
System.Windows.Controls.AlternationConverter.Values.
"""
pass
def ConvertBack(self,o,targetType,parameter,culture):
"""
ConvertBack(self: AlternationConverter,o: object,targetType: Type,parameter: object,culture: CultureInfo) -> object
Converts an object in the System.Windows.Controls.AlternationConverter.Values list to an integer.
o: The object to find in the System.Windows.Controls.AlternationConverter.Values property.
targetType: The type of the binding target property.
parameter: The converter parameter to use.
culture: The culture to use in the converter.
not exist in System.Windows.Controls.AlternationConverter.Values.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Values=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a list of objects that the System.Windows.Controls.AlternationConverter returns when an integer is passed to the System.Windows.Controls.AlternationConverter.Convert(System.Object,System.Type,System.Object,System.Globalization.CultureInfo) method.
Get: Values(self: AlternationConverter) -> IList
"""
|
11585718
|
import tensorflow as tf
from games.connect_4 import Connect4GameSpec
connect_4_game_spec = Connect4GameSpec()
def create_convolutional_network():
input_layer = tf.input_layer = tf.placeholder("float",
(None,) + connect_4_game_spec.board_dimensions() + (1,))
CONVOLUTIONS_LAYER_1 = 64
CONVOLUTIONS_LAYER_2 = 64
CONVOLUTIONS_LAYER_3 = 64
CONVOLUTIONS_LAYER_4 = 64
CONVOLUTIONS_LAYER_5 = 64
FLAT_SIZE = 7 * 6 * CONVOLUTIONS_LAYER_2
convolution_weights_1 = tf.Variable(tf.truncated_normal([3, 3, 1, CONVOLUTIONS_LAYER_1], stddev=0.01))
convolution_bias_1 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_1]))
convolution_weights_2 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_1, CONVOLUTIONS_LAYER_2], stddev=0.01))
convolution_bias_2 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_2]))
convolution_weights_3 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_2, CONVOLUTIONS_LAYER_3], stddev=0.01))
convolution_bias_3 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_3]))
convolution_weights_4 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_3, CONVOLUTIONS_LAYER_4], stddev=0.01))
convolution_bias_4 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_4]))
convolution_weights_5 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_4, CONVOLUTIONS_LAYER_5], stddev=0.01))
convolution_bias_5 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_5]))
# feed_forward_weights_1 = tf.Variable(tf.truncated_normal([FLAT_SIZE, FLAT_HIDDEN_NODES], stddev=0.01))
# feed_forward_bias_1 = tf.Variable(tf.constant(0.01, shape=[FLAT_HIDDEN_NODES]))
feed_forward_weights_2 = tf.Variable(
tf.truncated_normal([FLAT_SIZE, connect_4_game_spec.outputs()], stddev=0.01))
feed_forward_bias_2 = tf.Variable(tf.constant(0.01, shape=[connect_4_game_spec.outputs()]))
hidden_convolutional_layer_1 = tf.nn.relu(
tf.nn.conv2d(input_layer, convolution_weights_1, strides=[1, 1, 1, 1], padding="SAME") + convolution_bias_1)
hidden_convolutional_layer_2 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_1, convolution_weights_2, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_2)
hidden_convolutional_layer_3 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_2, convolution_weights_3, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_3)
hidden_convolutional_layer_4 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_3, convolution_weights_4, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_4)
hidden_convolutional_layer_5 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_4, convolution_weights_5, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_5)
hidden_convolutional_layer_3_flat = tf.reshape(hidden_convolutional_layer_5, [-1, FLAT_SIZE])
# final_hidden_activations = tf.nn.relu(
# tf.matmul(hidden_convolutional_layer_3_flat, feed_forward_weights_1) + feed_forward_bias_1)
output_layer = tf.nn.softmax(tf.matmul(hidden_convolutional_layer_3_flat, feed_forward_weights_2) + feed_forward_bias_2)
return input_layer, output_layer, [convolution_weights_1, convolution_bias_1,
convolution_weights_2, convolution_bias_2,
convolution_weights_3, convolution_bias_3,
convolution_weights_4, convolution_bias_4,
convolution_weights_5, convolution_bias_5,
# feed_forward_weights_1, feed_forward_bias_1,
feed_forward_weights_2, feed_forward_bias_2]
|
11585720
|
import cv2
import threading
from copy import deepcopy
from screen import convert_screen_to_monitor, grab
from typing import Union
from dataclasses import dataclass
import numpy as np
from logger import Logger
import time
import os
from config import Config
from utils.misc import cut_roi, load_template, list_files_in_folder, alpha_to_mask, roi_center, color_filter, mask_by_roi
from functools import cache
templates_lock = threading.Lock()
@dataclass
class Template:
name: str = None
img_bgra: np.ndarray = None
img_bgr: np.ndarray = None
img_gray: np.ndarray = None
alpha_mask: np.ndarray = None
@dataclass
class TemplateMatch:
name: str = None
score: float = -1.0
center: tuple[int, int] = None
center_monitor: tuple[int, int] = None
region: list[float] = None
region_monitor: list[float] = None
valid: bool = False
TEMPLATE_PATHS = [
"assets\\templates",
"assets\\npc",
"assets\\shop",
"assets\\item_properties",
"assets\\chests",
"assets\\gamble",
]
@cache
def _templates() -> dict[Template]:
paths = []
templates = {}
for path in TEMPLATE_PATHS:
paths += list_files_in_folder(path)
for file_path in paths:
file_name: str = os.path.basename(file_path)
if file_name.lower().endswith('.png'):
key = file_name[:-4].upper()
template_img = load_template(file_path)
templates[key] = Template(
name = key,
img_bgra = template_img,
img_bgr = cv2.cvtColor(template_img, cv2.COLOR_BGRA2BGR),
img_gray = cv2.cvtColor(template_img, cv2.COLOR_BGRA2GRAY),
alpha_mask = alpha_to_mask(template_img)
)
return templates
def get_template(key):
with templates_lock:
return _templates()[key].img_bgr
def _process_template_refs(ref: Union[str, np.ndarray, list[str]]) -> list[Template]:
templates = []
if type(ref) != list:
ref = [ref]
for i in ref:
# if the reference is a string, then it's a reference to a named template asset
if type(i) == str:
templates.append(_templates()[i.upper()])
# if the reference is an image, append new Template class object
elif type(i) == np.ndarray:
templates.append(Template(
img_bgr = i,
img_gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY),
alpha_mask = alpha_to_mask(i)
))
return templates
def _single_template_match(template: Template, inp_img: np.ndarray = None, roi: list = None, color_match: list = None, use_grayscale: bool = False) -> TemplateMatch:
inp_img = inp_img if inp_img is not None else grab()
template_match = TemplateMatch()
# crop image to roi
if roi is None:
# if no roi is provided roi = full inp_img
roi = [0, 0, inp_img.shape[1], inp_img.shape[0]]
rx, ry, rw, rh = roi
img = inp_img[ry:ry + rh, rx:rx + rw]
# filter for desired color or make grayscale
if color_match:
template_img, = color_filter(template.img_bgr, color_match)[1],
img = color_filter(img, color_match)[1]
elif use_grayscale:
template_img = template.img_gray
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
template_img = template.img_bgr
if not (img.shape[0] > template_img.shape[0] and img.shape[1] > template_img.shape[1]):
Logger.error(f"Image shape and template shape are incompatible: {template.name}. Image: {img.shape}, Template: {template_img.shape}")
else:
res = cv2.matchTemplate(img, template_img, cv2.TM_CCOEFF_NORMED, mask = template.alpha_mask)
np.nan_to_num(res, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
_, max_val, _, max_pos = cv2.minMaxLoc(res)
# save rectangle corresponding to matched region
rec_x = int((max_pos[0] + rx))
rec_y = int((max_pos[1] + ry))
rec_w = int(template_img.shape[1])
rec_h = int(template_img.shape[0])
template_match.region = [rec_x, rec_y, rec_w, rec_h]
template_match.region_monitor = [*convert_screen_to_monitor((rec_x, rec_y)), rec_w, rec_h]
template_match.center = roi_center(template_match.region)
template_match.center_monitor = convert_screen_to_monitor(template_match.center)
template_match.name = template.name
template_match.score = max_val
template_match.valid = True
return template_match
def search(
ref: Union[str, np.ndarray, list[str]],
inp_img: np.ndarray,
threshold: float = 0.68,
roi: list[float] = None,
use_grayscale: bool = False,
color_match: list = False,
best_match: bool = False
) -> TemplateMatch:
"""
Search for a template in an image
:param ref: Either key of a already loaded template, list of such keys, or a image which is used as template
:param inp_img: Image in which the template will be searched
:param threshold: Threshold which determines if a template is found or not
:param roi: Region of Interest of the inp_img to restrict search area. Format [left, top, width, height]
:param use_grayscale: Use grayscale template matching for speed up
:param color_match: Pass a color to be used by misc.color_filter to filter both image of interest and template image (format Config().colors["color"])
:param best_match: If list input, will search for list of templates by best match. Default behavior is first match.
:return: Returns a TemplateMatch object with a valid flag
"""
templates = _process_template_refs(ref)
matches = []
for template in templates:
match = _single_template_match(template, inp_img, roi, color_match, use_grayscale)
if match.score >= threshold:
if not best_match:
return match
else:
matches.append(match)
if matches:
matches = sorted(matches, key=lambda obj: obj.score, reverse=True)
return matches[0]
return TemplateMatch()
def search_and_wait(
ref: Union[str, list[str]],
roi: list[float] = None,
timeout: float = 30,
threshold: float = 0.68,
use_grayscale: bool = False,
color_match: list = False,
best_match: bool = False,
suppress_debug: bool = False,
) -> TemplateMatch:
"""
Helper function that will loop and keep searching for a template
:param timeout: After this amount of time the search will stop and it will return [False, None]
:Other params are the same as for TemplateFinder.search()
:returns a TemplateMatch object
"""
if not suppress_debug:
Logger.debug(f"Waiting for templates: {ref}")
start = time.time()
template_match = TemplateMatch()
while (time_remains := time.time() - start < timeout):
img = grab()
is_loading_black_roi = np.average(img[:, 0:Config().ui_roi["loading_left_black"][2]]) < 1.0
if not is_loading_black_roi or "LOADING" in ref:
template_match = search(ref, img, roi=roi, threshold=threshold, use_grayscale=use_grayscale, color_match=color_match, best_match=best_match)
if template_match.valid:
break
if not time_remains:
Logger.debug(f"Could not find desired templates")
else:
Logger.debug(f"Found match: {template_match.name} ({template_match.score*100:.1f}% confidence)")
return template_match
def search_all(
ref: Union[str, np.ndarray, list[str]],
inp_img: np.ndarray,
threshold: float = 0.68,
roi: list[float] = None,
use_grayscale: bool = False,
color_match: list = False,
) -> list[TemplateMatch]:
"""
Returns a list of all templates scoring above set threshold on the screen
:Other params are the same as for TemplateFinder.search()
:return: Returns a list of TemplateMatch objects
"""
templates = _process_template_refs(ref)
matches = []
img = inp_img
while True:
any_found = False
for template in templates:
match = _single_template_match(template, img, roi, color_match, use_grayscale)
if (ind_found := match.score >= threshold):
matches.append(match)
img = mask_by_roi(img, match.region, "inverse")
any_found |= ind_found
if not any_found:
break
return matches
# Testing: Have whatever you want to find on the screen
if __name__ == "__main__":
import keyboard
import os
from screen import start_detecting_window, stop_detecting_window
from utils.misc import wait
import template_finder
start_detecting_window()
wait(0.1)
print("\n== Hotkeys ==")
print("F11: Start")
print("F12: Exit")
print("Down arrow: decrease template matching threshold")
print("Up arrow: increase template matching threshold")
print("Left arrow: decrease template index")
print("Right arrow: increase template index")
print("F9: toggle all vs. individual template(s)")
print("F10: save visible template(s)")
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or stop_detecting_window() or os._exit(1))
keyboard.wait("f11")
# enter the template names you are trying to detect here
_template_list = ["SHENK_0","SHENK_1","SHENK_10","SHENK_11","SHENK_12","SHENK_13","SHENK_15","SHENK_16","SHENK_17","SHENK_18","SHENK_19","SHENK_2","SHENK_20","SHENK_3","SHENK_4","SHENK_6","SHENK_7","SHENK_8","SHENK_9","SHENK_DEATH_0","SHENK_DEATH_1","SHENK_DEATH_2","SHENK_DEATH_3","SHENK_DEATH_4","SHENK_V2_3","SHENK_V2_4","SHENK_V2_6","SHENK_V2_7","SHENK_V2_8"]
_current_template_idx = -1
_last_stored_idx = 0
_current_threshold = 0.5
_visible_templates = []
def _save_visible_templates():
if not os.path.exists("info_screenshots"):
os.system("mkdir info_screenshots")
for match in _visible_templates:
cv2.imwrite(match['filename'], match['img'])
Logger.info(f"{match['filename']} saved")
def _toggle_templates():
global _current_template_idx
_current_template_idx = -1 if _current_template_idx != -1 else _last_stored_idx
if _current_template_idx == -1:
Logger.info(f"Searching for templates: {_template_list}")
else:
Logger.info(f"Searching for template: {_template_list[_current_template_idx]}")
def _incr_template_idx(direction: int = 1):
global _current_template_idx, _last_stored_idx
if \
(-1 < _current_template_idx < len(_template_list) - 1) or \
(_current_template_idx == -1 and direction > 0) or \
(_current_template_idx == len(_template_list) - 1 and direction < 0):
_current_template_idx += direction
_last_stored_idx = _current_template_idx
if _current_template_idx == -1:
Logger.info(f"Searching for templates: {_template_list}")
else:
Logger.info(f"Searching for template: {_template_list[_current_template_idx]}")
def _incr_threshold(direction: int = 1):
global _current_threshold
if (_current_threshold < 1 and direction > 0) or (_current_threshold > 0 and direction < 0):
_current_threshold = round(_current_threshold + direction, 2)
Logger.info(f"_current_threshold = {_current_threshold}")
keyboard.add_hotkey('down', lambda: _incr_threshold(-0.05))
keyboard.add_hotkey('up', lambda: _incr_threshold(0.05))
keyboard.add_hotkey('left', lambda: _incr_template_idx(-1))
keyboard.add_hotkey('right', lambda: _incr_template_idx(1))
keyboard.add_hotkey('f9', lambda: _toggle_templates())
keyboard.add_hotkey('f10', lambda: _save_visible_templates())
while 1:
_visible_templates = []
img = grab()
display_img = img.copy()
if _current_template_idx < 0:
templates = _template_list
else:
templates = [_template_list[_current_template_idx]]
for key in templates:
template_match = template_finder.search(key, img, threshold=_current_threshold)
if template_match.valid:
x, y = template_match.center
cv2.putText(display_img, str(template_match.name), template_match.center, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.circle(display_img, template_match.center, 7, (255, 0, 0), thickness=5)
cv2.rectangle(display_img, template_match.region[:2], (template_match.region[0] + template_match.region[2], template_match.region[1] + template_match.region[3]), (0, 0, 255), 1)
print(f"Name: {template_match.name} Pos: {template_match.center}, Dist: {625-x, 360-y}, Score: {template_match.score}")
match = {
'filename': f"./info_screenshots/{key.lower()}.png",
'img': cut_roi(img, template_match.region)
}
_visible_templates.append(match)
cv2.imshow('test', display_img)
key = cv2.waitKey(3000)
|
11585732
|
import uclasm
nodelist, channels, adjs = \
uclasm.load_edgelist(
"example_data_files/example_edgelist.csv",
src_col=0,
dst_col=1,
channel_col=2,
header=0)
nodes = nodelist.node
labels = nodelist.label
# Use the same graph data for both template and world graphs
tmplt = uclasm.Graph(nodes, channels, adjs, labels=labels)
world = uclasm.Graph(nodes, channels, adjs, labels=labels)
|
11585754
|
from unittest import TestCase
import os
from vcr import VCR
import pytest
from click.testing import CliRunner
import pyicloud_ipd
from icloudpd.base import main
from icloudpd.authentication import authenticate, TwoStepAuthRequiredError
import inspect
vcr = VCR(decode_compressed_response=True)
class AuthenticationTestCase(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def test_failed_auth(self):
with vcr.use_cassette("tests/vcr_cassettes/failed_auth.yml"):
with self.assertRaises(
pyicloud_ipd.exceptions.PyiCloudFailedLoginException
) as context:
authenticate(
"bad_username",
"bad_password",
client_id="EC5646DE-9423-11E8-BF21-14109FE0B321",
)
self.assertTrue("Invalid email/password combination." in str(context.exception))
def test_2sa_required(self):
with vcr.use_cassette("tests/vcr_cassettes/auth_requires_2sa.yml"):
with self.assertRaises(TwoStepAuthRequiredError) as context:
# To re-record this HTTP request,
# delete ./tests/vcr_cassettes/auth_requires_2sa.yml,
# put your actual credentials in here, run the test,
# and then replace with dummy credentials.
authenticate(
"<EMAIL>",
"<PASSWORD>",
raise_error_on_2sa=True,
client_id="EC5646DE-9423-11E8-BF21-14109FE0B321",
)
self.assertTrue(
"Two-step/two-factor authentication is required!"
in str(context.exception)
)
def test_successful_auth(self):
with vcr.use_cassette("tests/vcr_cassettes/successful_auth.yml"):
authenticate(
"<EMAIL>",
"<PASSWORD>",
client_id="EC5646DE-9423-11E8-BF21-14109FE0B321",
)
def test_password_prompt(self):
base_dir = os.path.normpath(f"tests/fixtures/Photos/{inspect.stack()[0][3]}")
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
runner = CliRunner(env={
"CLIENT_ID": "DE309E26-942E-11E8-92F5-14109FE0B321"
})
result = runner.invoke(
main,
[
"--username",
"<EMAIL>",
"--recent",
"0",
"--no-progress-bar",
"-d",
base_dir,
],
input="<PASSWORD>",
)
self.assertIn("DEBUG Authenticating...", self._caplog.text)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
|
11585776
|
import numpy as np
# arr_outcomes = np.load('../processed_data/arr_outcomes.npy', allow_pickle=True)
"""Use 8:1:1 split"""
p_train = 0.80
p_val = 0.10
p_test = 0.10
n = 11988 # original 12000 patients, remove 12 outliers
n_train = round(n*p_train)
n_val = round(n*p_val)
n_test = n - (n_train+n_val)
Nsplits = 5
for j in range(Nsplits):
p = np.random.permutation(n)
idx_train = p[:n_train]
idx_val = p[n_train:n_train+n_val]
idx_test = p[n_train+n_val:]
np.save('../splits/phy12_split_subset'+str(j+1)+'.npy', (idx_train, idx_val, idx_test))
print('split IDs saved')
|
11585791
|
import time
from metadrive import TopDownMetaDriveEnvV2
if __name__ == '__main__':
env = TopDownMetaDriveEnvV2(dict(environment_num=10, frame_stack=10, frame_skip=3))
o = env.reset()
start = time.time()
action = [0.0, 0.1]
print(o.shape)
for s in range(10000):
o, r, d, i = env.step(action)
if d:
env.reset()
if (s + 1) % 100 == 0:
print(
"(TopDownEnv) Finish {}/10000 simulation steps. Time elapse: {:.4f}. Average FPS: {:.4f}".format(
s + 1,
time.time() - start, (s + 1) / (time.time() - start)
)
)
print(f"(TopDownEnv) Total Time Elapse: {time.time() - start}")
|
11585794
|
import io
import os
import re
import sys
from jinja2 import Environment, PackageLoader, select_autoescape
from primehub import PrimeHub
from primehub.cli import create_sdk, main as cli_main
from primehub.utils.decorators import find_actions, find_action_method
env = Environment(
loader=PackageLoader("primehub.extras"),
autoescape=select_autoescape()
)
def get_example(command, role=''):
try:
if role:
return env.get_template('examples/{}/{}.md'.format(role, command)).render()
else:
return env.get_template('examples/{}.md'.format(command)).render()
except BaseException:
pass
return "TBD: please write example for [{}]".format(command)
def get_doc_path():
import primehub
p = os.path.abspath(os.path.dirname(primehub.__file__) + "/../docs")
return p
def create_cli_doc_path(name, role=''):
if role:
doc_path = os.path.join(get_doc_path(), 'CLI', role, name + ".md")
else:
doc_path = os.path.join(get_doc_path(), 'CLI', name + ".md")
os.makedirs(os.path.dirname(doc_path), exist_ok=True)
return doc_path
def generate_command_document(*args, **kwargs):
if kwargs['role']:
kwargs['role_title'] = f'<{kwargs["role"].upper()}> '
kwargs['role'] = f'{kwargs["role"]} '
return env.get_template('cli.tpl.md').render(*args, **kwargs)
def generate_help_for_command(sdk: PrimeHub, name, role=''):
sdk.stderr = io.StringIO()
sdk.stdout = io.StringIO()
if role:
sys.argv = ['primehub', role, name, '-h']
else:
sys.argv = ['primehub', name, '-h']
try:
cli_main(sdk=sdk)
except SystemExit:
pass
command_help = sdk.stderr.getvalue()
actions = find_actions(sdk.commands[name])
attach_template_information_to_action(actions, name, sdk)
document = generate_command_document(command=name, command_help=command_help, role=role,
actions=actions, examples=get_example(name, role))
print("Generate doc", name)
p = create_cli_doc_path(name, role)
with open(p, "w") as fh:
fh.write(document)
def attach_template_information_to_action(actions, name, sdk):
for action in actions:
explain = extract_description_from_docstring(action, name, sdk)
def explained(x):
if x in explain:
return "{}: {}".format(x, explain[x])
return x
# arguments
arg_list = []
for x in action['arguments']:
if x[2] is True:
# skip **kwargs
continue
arg_list.append(x[0])
action['required_arguments_string'] = " ".join(["<%s>" % x for x in arg_list])
action['required_arguments'] = [explained(x) for x in arg_list]
# optionals
opt_list = []
for x in action['optionals']:
opt_list.append(x[0])
action['optional_arguments'] = [explained(x) for x in opt_list]
def extract_description_from_docstring(action, name, sdk):
output = dict()
method_name = find_action_method(sdk.commands[name], action['name'])
doc_string = getattr(sdk.commands[name], method_name).__doc__
param_description = re.findall(r':param ([^:]+):(.+)', str(doc_string))
for k, v in param_description:
output[k.strip()] = v.strip()
return output
def main():
sdk = create_sdk()
for k, v in sdk.commands.items():
if k == 'devlab':
continue
if k == 'version':
continue
if k == 'admin':
continue
generate_help_for_command(sdk, k)
for k, v in sdk.admin_commands.items():
generate_help_for_command(sdk, k, 'admin')
if __name__ == '__main__':
main()
|
11585797
|
from __future__ import absolute_import
from itertools import chain
from six.moves import range, reduce
import os
import re
import numpy as np
import tensorflow as tf
import codecs
import sys
from tqdm import tqdm
import pickle
import logging
class Dataset():
def __init__(self, data='data/tasks_1-20_v1-2/en/',ts_num=1):
self._data = get_train_test(data,ts_num)
self.len_train = len(self._data['train']['S'])
self.len_val = len(self._data['val']['S'])
self.len_test = len(self._data['test']['S'])
def get_minibatches(self,n, minibatch_size, shuffle=False):
idx_list = np.arange(0, n, minibatch_size)
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
for idx in idx_list:
minibatches.append(np.arange(idx, min(idx + minibatch_size, n)))
return minibatches
def gen_examples(self,batch_size,tip):
"""
Divide examples into batches of size `batch_size`.
"""
minibatches = self.get_minibatches(len(self._data[tip]['S']), batch_size)
all_ex = []
for minibatch in minibatches:
mb_x1 = [self._data[tip]['S'][t] for t in minibatch]
mb_x2 = [self._data[tip]['Q'][t] for t in minibatch]
mb_y = [self._data[tip]['A'][t] for t in minibatch]
all_ex.append((np.array(mb_x1), np.array(mb_x2), np.array(mb_y)))
return all_ex
def get_train_test(which_task='data/tasks_1-20_v1-2/en/',task_num=1):
train, val, test = load_task(which_task,task_num)
data = train + test + val
vocab = sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)))
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([ len(s) for s, _, _ in data ]))
sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data)))
query_size = max(map(len, (q for _, q, _ in data)))
if (task_num==3):
max_story_size = min(130, max_story_size)
else:
max_story_size = min(70, max_story_size)
vocab_size = len(word_idx) +1# +1 for nil word
sentence_size = max(query_size, sentence_size) # for the position
sentence_size+=1
logging.info("Longest sentence length: "+ str( sentence_size))
logging.info("Longest story length: "+ str( max_story_size))
logging.info("Average story length: "+ str( mean_story_size))
logging.info("Training sample: "+ str(len(train)))
logging.info("Validation sample: "+ str(len(val)))
logging.info("Test sample: "+ str(len(test)))
logging.info("Vocab size : "+ str(vocab_size))
S, Q, A = vectorize_data(train, word_idx, sentence_size, max_story_size)
valS, valQ, valA = vectorize_data(val, word_idx, sentence_size, max_story_size)
testS, testQ, testA = vectorize_data(test, word_idx, sentence_size, max_story_size)
return {'train':{'S':S, 'Q':np.expand_dims(Q, axis=1), 'A':A},
'val':{'S':valS, 'Q':np.expand_dims(valQ, axis=1), 'A':valA},
'test':{'S':testS, 'Q':np.expand_dims(testQ, axis=1), 'A':testA},
'vocab':vocab,
'vocab_size':vocab_size,
'sent_len':sentence_size,
'sent_numb':max_story_size,
'word_idx':word_idx,
'len_training':len(train)}
def load_task(data_dir, task_id, only_supporting=False):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 21
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = 'qa{}_'.format(task_id)
train_file = [f for f in files if s in f and 'train' in f][0]
val_file = [f for f in files if s in f and 'valid.txt' in f][0]
test_file = [f for f in files if s in f and 'test' in f][0]
train_data = get_stories(train_file, only_supporting)
val_data = get_stories(val_file, only_supporting)
test_data = get_stories(test_file, only_supporting)
return train_data, val_data, test_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbI tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = str.lower(line)
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line: # question
q, a, supporting = line.split('\t')
q = tokenize(q)
#a = tokenize(a)
# answer is one vocab word even if it's actually multiple words
a = [a]
substory = None
# remove question marks
if q[-1] == "?":
q = q[:-1]
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else: # regular sentence
# remove periods
sent = tokenize(line)
if sent[-1] == ".":
sent = sent[:-1]
story.append(sent)
return data
def get_stories(f, only_supporting=False):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_stories(f.readlines(), only_supporting=only_supporting)
def vectorize_data(data, word_idx, sentence_size, memory_size):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
S = []
Q = []
A = []
for story, query, answer in data:
ss = []
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# Make the last word of each sentence the time 'word' which
# corresponds to vector of lookup table
#for i in range(len(ss)):
# ss[i][-1] = len(word_idx) - memory_size - i + len(ss)
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] for w in query] + [0] * lq
# y = np.zeros(len(word_idx) + 1) # 0 is reserved for nil word
# print(answer)
# for a in answer:
# y[word_idx[a]] = 1
S.append(ss)
Q.append(q)
A.append(word_idx[answer[0]])
return np.array(S), np.array(Q), np.array(A)
def gen_embeddings(word_dict, dim, in_file=None, init=None):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
num_words = max(word_dict.values()) +1
# embeddings = np.zeros((num_words, dim))
embeddings = np.random.standard_normal(size=(num_words, dim))
logging.info('Embeddings: %d x %d' % (num_words, dim))
if in_file is not None:
logging.info('Loading embedding file: %s' % in_file)
pre_trained = 0
for line in open(in_file).readlines():
sp = line.split()
assert len(sp) == dim + 1
if sp[0] in word_dict:
pre_trained += 1
embeddings[word_dict[sp[0]]] = [float(x) for x in sp[1:]]
logging.info('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / num_words))
return embeddings
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
# mean = tf.reduce_mean(var)
# tf.scalar_summary('mean/' + name, mean)
# with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# tf.scalar_summary('stddev/' + name, stddev)
# tf.scalar_summary('max/' + name, tf.reduce_max(var))
# tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
|
11585802
|
import numpy as np
import scipy
import scipy.sparse as sp
from igraph import Graph, VertexCover
def __reset_diagonal(A, sparse):
'''
input: matrix
ouput: matrix object with diagonals set to 0
'''
if sparse:
A = A - sp.dia_matrix((A.diagonal()[scipy.newaxis, :], [0]), shape=A.shape)
else:
A = A.copy()
np.fill_diagonal(A, 0)
return A
def __get_diagonal(A, sparse):
'''
input: Matrix
output: vector with the diagonal entries
'''
if sparse:
return A.diagonal()
else:
return np.diag(A)
def __get_matrix(vc, sparse):
'''
inputs: List of lists (vertexCover) object
output: Node x Node matrix with the cell values indicating the number of clusters
each pair of nodes shares
'''
n = len(vc) # number of nodes
nc = max([max(i) for i in vc if i]) + 1 # number of clusters
create_zero_matrix = sp.csr_matrix if sparse else np.zeros
A = create_zero_matrix((n,n), dtype='int')
for i in range(nc):
# Create a Clique from Membership
v = np.matrix([ (i in m)*1 for m in vc])
if sparse:
v = sp.csr_matrix(v)
Ai = v.T*v
A = A+Ai
# DO NOT ZERO THE DIAGONALS HERE, __get_omega_e depends on them.
return A.tocsr() if sparse else A
def __get_omega_u(A1, A2, sparse):
'''
inputs: Two __get_matrix results
outputs: un-adjusted omega score
'''
n = A1.shape[0]
M = n*(n-1)/2.0
notA = __reset_diagonal((A1 != A2), sparse)
rv = n*(n-1) - notA.sum()
return rv/(2*M)
def __get_omega_e(A1, A2, sparse):
'''
inputs: Two __get_matrix results
outputs: expected omega score
'''
n = A1.shape[0]
M = n*(n-1)/2.0
k = max(max((__get_diagonal(A1, sparse))), max(__get_diagonal(A2, sparse)))
# The 0th iteration is done with a negation since it is a sparse matrix
t_not0_1 = __reset_diagonal((A1 != 0), sparse)
t_not0_2 = __reset_diagonal((A2 != 0), sparse)
rv = n*(n-1) - t_not0_1.sum()
rv *= n*(n-1) - t_not0_2.sum()
for i in range(1, k+1):
t_i_1 = __reset_diagonal((A1 == i), sparse)
t_i_2 = __reset_diagonal((A2 == i), sparse)
rv += t_i_1.sum()*t_i_2.sum()
rv /= (2*M)**2
return rv;
def omega_index(cover_membership_a, cover_membership_b, sparse=True):
'''
Uses the Omega Index metrics to compare two covers of a given domain, e.g. a Graph.
@param cover_membership_a : A list of vertex to membership list.
Example - a = [[0,1],[1],[0,2]]
@param cover_membership_b : A list of vertex to membership list.
@returns: Best match = 1, No match = 0
'''
A1 = __get_matrix(cover_membership_a, sparse)
A2 = __get_matrix(cover_membership_b, sparse)
omega_u = __get_omega_u(A1, A2, sparse)
omega_e = __get_omega_e(A1, A2, sparse)
return (omega_u - omega_e)/(1-omega_e)
|
11585823
|
import json
import unittest
from subprocess import check_output
import tempfile
import fixtures
class SkinferScriptTest(unittest.TestCase):
script = 'skinfer'
def test_end_to_end_simple_run(self):
# given:
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write('{"one": "two"}')
expected = {
"$schema": "http://json-schema.org/draft-04/schema",
"required": [
"one"
],
"type": "object",
"properties": {
"one": {
"type": "string"
}
}
}
# when:
output = check_output([self.script, filename])
# then:
self.assertEqual(expected, json.loads(output))
def test_run_with_json_samples_in_separate_files(self):
# given:
sample1 = fixtures.get_sample_path('minimal-1.json')
sample2 = fixtures.get_sample_path('sample2-yelp.json')
# when:
output = check_output([self.script, sample1, sample2])
# then:
data = json.loads(output)
self.assertIsNotNone(data)
self.assertIn('required', data)
self.assertIn('properties', data)
def test_run_with_jsonlines_samples(self):
# given:
infile = fixtures.get_sample_path('jsonlines.jsonl')
# when:
output = check_output([self.script, '--jsonlines', infile])
# then:
data = json.loads(output)
self.assertIsNotNone(data)
self.assertIn('required', data)
self.assertIn('properties', data)
def test_run_with_jsonlines_samples_omitting_option(self):
# given:
infile = fixtures.get_sample_path('jsonlines.jsonl')
# when:
output = check_output([self.script, infile])
# then:
data = json.loads(output)
self.assertIsNotNone(data)
self.assertIn('required', data)
self.assertIn('properties', data)
|
11585830
|
import datetime
import uuid
from typing import Optional, Union
from unittest import TestCase
import jsons
from jsons import (
SerializationError,
DeserializationError,
UnfulfilledArgumentError,
)
class TestUnion(TestCase):
def test_dump_optional_primitive(self):
class C:
def __init__(self, x: Optional[str]):
self.x = x
expected = {'x': '42'}
dumped = jsons.dump(C('42'))
self.assertDictEqual(expected, dumped)
def test_dump_optional_uuid(self):
class C:
def __init__(self, x: Optional[uuid.UUID]):
self.x = x
expected = {'x': '00000000-0000-0000-0000-000000000000'}
dumped = jsons.dump(C(uuid.UUID(int=0)))
self.assertDictEqual(expected, dumped)
def test_dump_optional_class_primitive(self):
class C:
def __init__(self, x: Optional[int]):
self.x = x
expected = {'x': 42}
dumped = jsons.dump(C(42))
self.assertDictEqual(expected, dumped)
expected2 = {'x': None}
dumped2 = jsons.dump(C(None))
self.assertDictEqual(expected2, dumped2)
def test_dump_optional_class_uuid(self):
class C:
def __init__(self, x: Optional[uuid.UUID]):
self.x = x
expected = {'x': '00000000-0000-0000-0000-000000000000'}
dumped = jsons.dump(C(uuid.UUID(int=0)))
self.assertDictEqual(expected, dumped)
expected2 = {'x': None}
dumped2 = jsons.dump(C(None))
self.assertDictEqual(expected2, dumped2)
def test_dump_optional(self):
dumped = jsons.dump(None, Optional[int])
self.assertEqual(None, dumped)
def test_dump_union(self):
class A:
def __init__(self, x: int):
self.x = x
class B:
def __init__(self, y: int):
self.y = y
dumped = jsons.dump(A(1), Union[B, A])
expected = {'x': 1}
self.assertDictEqual(expected, dumped)
dumped2 = jsons.dump(A(1), Union[B, A], strict=True)
expected2 = {'x': 1}
self.assertDictEqual(expected2, dumped2)
with self.assertRaises(SerializationError):
jsons.dump(A(1), Union[B], strict=True)
def test_fail(self):
with self.assertRaises(SerializationError) as err:
jsons.dump('nope', Union[int, float])
self.assertIn('str', str(err.exception))
self.assertIn('int', str(err.exception))
self.assertIn('float', str(err.exception))
def test_load_union(self):
class A:
def __init__(self, x):
self.x = x
class B:
def __init__(self, x: Optional[int]):
self.x = x
class C:
def __init__(self, x: Union[datetime.datetime, A]):
self.x = x
# Test loading with None value without type hint.
self.assertEqual(None, jsons.load({'x': None}, A).x)
# Test Optional with a value.
self.assertEqual(1, jsons.load({'x': 1}, B).x)
# Test Optional with None value.
self.assertEqual(None, jsons.load({'x': None}, B).x)
# Test Optional without value.
self.assertEqual(None, jsons.load({}, B).x)
# Test Union with a value.
self.assertEqual(1, jsons.load({'x': {'x': 1}}, C).x.x)
# Test Union with invalid value.
with self.assertRaises(DeserializationError):
jsons.load({'x': 'no match in the union'}, C).x
def test_load_none(self):
class C:
def __init__(self, x: int, y: Optional[int]):
self.x = x
self.y = y
with self.assertRaises(UnfulfilledArgumentError):
jsons.load({}, cls=C)
with self.assertRaises(UnfulfilledArgumentError):
jsons.load({'y': 1}, cls=C)
jsons.load({'x': 1, 'y': None}, cls=None) # Should not raise.
jsons.load({'x': 1, 'y': None}, cls=None, strict=True) # Should not raise.
jsons.load({'x': 1}, cls=None, strict=True) # Should not raise.
jsons.load(None) # Should not raise.
def test_load_optional(self):
class TestOptionalInt:
def __init__(self, value: Optional[int]):
self.value = value
# This seems fine.
loaded1 = jsons.load({'value': 42}, cls=TestOptionalInt)
self.assertEqual(42, loaded1.value)
# Strings are parsed if possible.
loaded2 = jsons.load({'value': '42'}, cls=TestOptionalInt)
self.assertEqual(42, loaded2.value)
# No value or None will result in None.
loaded3 = jsons.load({}, cls=TestOptionalInt)
loaded4 = jsons.load({'value': None}, cls=TestOptionalInt)
self.assertEqual(None, loaded3.value)
self.assertEqual(None, loaded4.value)
# Now this will fail.
with self.assertRaises(DeserializationError):
jsons.load({'value': 'not good'}, cls=TestOptionalInt)
|
11585843
|
import io
import sys
from setuptools import setup, find_packages
from shutil import rmtree
if sys.argv[:2] == ["setup.py", "bdist_wheel"]:
# Remove previous build dir when creating a wheel build,
# since if files have been removed from the project,
# they'll still be cached in the build dir and end up
# as part of the build, which is really neat!
try:
rmtree("build")
except:
pass
long_description = (
io.open('README.rst', encoding='utf-8').read()
+ '\n\n'
+ io.open('CHANGES.rst', encoding='utf-8').read()
)
setup(
name='cartridge_braintree',
version='1.2.3.dev0',
description="Braintree Payments processing for Mezzanine/Cartridge",
long_description=long_description,
author="<NAME>",
author_email="<EMAIL>",
license="BSD",
url="https://github.com/henri-hulski/cartridge_braintree",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 1.8",
"Framework :: Django :: 1.9",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='django mezzanine cartridge payment',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'braintree',
'cartridge >= 0.13',
'django >= 1.11.29, < 1.12',
],
extras_require=dict(countries_utf8_sorting=['pyuca'],),
)
|
11585849
|
import pybithumb
con_key = "81dd5f25e5daa70b2fff603901d2c09c"
sec_key = "82333efegeg9eg3e77c573weg34af17a"
bithumb = pybithumb.Bithumb(con_key, sec_key)
krw = bithumb.get_balance("BTC")[2]
orderbook = pybithumb.get_orderbook("BTC")
asks = orderbook['asks']
sell_price = asks[0]['price']
unit = krw/float(sell_price)
order = bithumb.buy_market_order("BTC", unit)
print(order)
|
11585856
|
import rospy
import rospkg
import argparse
import bisect
import numpy as np
import pylab as pl
import time
import logging
import pickle
import os
import progressbar
from planner_comparison.plan_scoring import *
from planner_comparison.RosbagInterface import *
# Deep motion planner
from deep_motion_planner.tensorflow_wrapper import TensorflowWrapper
from deep_motion_planner.deep_motion_planner import *
import deep_motion_planner.util as dmp_util
def parse_args():
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser(description='Compare different motion planners')
parser.add_argument('-p', '--logPath', help='path to the logfile that should be analyzed', type=str, default="")
parser.add_argument('-m', '--modelPath', help='path to the deep model', type=str, default=(rospkg.RosPack().get_path('deep_motion_planner'))+'/models/')
parser.add_argument('-f', '--protobufFile', help='name of the protobuf file that comprises the model structure', type=str, default="model.pb")
args = parser.parse_args()
return args
def save_data(data, storage_path):
answer = raw_input('Do you want to save the data? ')
if answer.lower() == 'y' or answer.lower() == 'yes':
filename = raw_input('Please enter the desired filename: ')
pickle.dump(data, open(storage_path + "/" + filename, 'wb'))
def plot_velocities(ax_trans, ax_rot, velocities, color='b', linestyle='-', label=None):
t_vec = [rt.to_sec() for rt in velocities.times]
ax_trans.plot(t_vec, [v.linear.x for v in velocities.msgs], color=color, linestyle=linestyle, label=label)
ax_rot.plot(t_vec, [v.angular.z for v in velocities.msgs], color=color, linestyle=linestyle, label=label)
def plot_trajectory(ax, loc_msgs, color='r', linestyle='-'):
"""
Plot trajectory of the robot in Euclidean coordinates
"""
x_vec = [l.feedback.base_position.pose.position.x for l in loc_msgs.msgs]
y_vec = [l.feedback.base_position.pose.position.y for l in loc_msgs.msgs]
ax.plot(x_vec, y_vec, color=color, linestyle=linestyle)
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
def get_closest_msg_in_past(t_desired, time_msg_list):
idx_des = bisect.bisect(time_msg_list.times, t_desired)-1
return time_msg_list.msgs[idx_des]
def compute_deep_plan(tensorflow_wrapper, scan_ranges, relative_target):
"""
Compute the result of the deep motion planner for a specific timestamp
"""
cropped_scans = dmp_util.adjust_laser_scans_to_model(scan_ranges, 1, 1080, max_range = 10.0)
# Compute target in range and yaw format
goal = np.array(relative_target)
angle = np.arctan2(goal[1],goal[0])
norm = np.minimum(np.linalg.norm(goal[0:2], ord=2), 10.0)
data = np.array([angle, norm, goal[2]])
input_data = cropped_scans.tolist() + data.tolist()
linear_x, angular_z = tensorflow_wrapper.inference(input_data)
cmd = Twist()
cmd.linear.x = linear_x
cmd.angular.z = angular_z
return cmd
################## Setup #####################
pl.close('all')
args = parse_args()
plot_velocities_switch = True
plot_trajectory_switch = False
plot_errors_swtich = True
run_comparison = True
logging.basicConfig(level=logging.INFO) # INFO: 20 | DEBUG: 10
data_storage = {}
##############################################
logging.info('Loading the data ...')
rosbag_if = RosbagInterface(args.logPath)
msg_container = rosbag_if.msg_container
if run_comparison:
# Compute deep plans for timestamps
time_vec = msg_container['vel_cmd'].times
vel_cmd_deep = TimeMsgContainer()
with TensorflowWrapper(args.modelPath, args.protobufFile, False) as tf_wrapper:
print('Evaluation progress:')
p_bar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), progressbar.Bar(), progressbar.ETA()],
maxval=len(time_vec)).start()
t_start = time.time()
cnt = 0
for t in time_vec:
# Get appropriate messages
current_pos = get_closest_msg_in_past(t, msg_container['loc'])
current_scan = get_closest_msg_in_past(t, msg_container['scan'])
current_goal = get_closest_msg_in_past(t, msg_container['goal'])
# Compute relative target (in robot frame)
relative_target = dmp_util.compute_relative_target_raw(current_pos.pose, current_goal)
vel_cmd_deep.times.append(t)
vel_cmd_deep.msgs.append(compute_deep_plan(tf_wrapper, scan_ranges=current_scan.ranges, relative_target=relative_target))
cnt +=1
p_bar.update(cnt)
p_bar.finish()
print("Avg. model query time was {0} ms".format((time.time()-t_start) * 1000.0 / len(time_vec)))
data_storage['vel_cmd_deep'] = vel_cmd_deep
# Run evaluation
vel_trans_diff = np.zeros([len(vel_cmd_deep),1])
vel_rot_diff = np.zeros([len(vel_cmd_deep),1])
cnt = 0
for v_ros, v_deep in zip(msg_container['vel_cmd'].msgs, vel_cmd_deep.msgs):
vel_trans_diff[cnt] = np.abs(v_ros.linear.x - v_deep.linear.x)
vel_rot_diff[cnt] = np.abs(v_ros.angular.z - v_deep.angular.z)
cnt += 1
mean_trans_error = np.mean(vel_trans_diff)
std_trans_error = np.std(vel_trans_diff)
mean_rot_error = np.mean(vel_rot_diff)
std_rot_error = np.std(vel_rot_diff)
data_storage['vel_trans_diff'] = vel_trans_diff
data_storage['vel_rot_diff'] = vel_rot_diff
data_storage['trans_error'] = (mean_trans_error, std_trans_error)
data_storage['rot_error'] = (mean_rot_error, std_rot_error)
save_data(data_storage, '../data/')
print("Translational velocities: ")
print("\tMean error: {0}".format(mean_trans_error))
print("\tStandard dev: {0}".format(std_trans_error))
print("\nRotational velocities: ")
print("\tMean error: {0}".format(mean_rot_error))
print("\tStandard dev: {0}".format(std_rot_error))
# Plotting
if plot_velocities_switch:
pl.figure('Velocity Command Comparison')
ax_trans = pl.subplot(211)
ax_rot = pl.subplot(212)
plot_velocities(ax_trans, ax_rot, msg_container['vel_cmd'], color='r', linestyle='-', label='ros')
plot_velocities(ax_trans, ax_rot, vel_cmd_deep, color='g', linestyle='-', label='deep')
ax_trans.set_ylim([-1., 1.])
ax_trans.set_ylabel('trans_vel [m/s]')
ax_trans.grid('on')
ax_rot.set_ylim([-2., 2.])
ax_rot.set_ylabel('rot_vel [rad/s]')
ax_rot.set_xlabel('time [s]')
ax_rot.grid('on')
# Trajectory
if plot_trajectory_switch:
pl.figure('Robot trajectory')
ax = pl.subplot(111)
plot_trajectory(ax, loc_msgs)
ax.grid('on')
if plot_errors_swtich:
pl.figure('Error plots')
ax = pl.subplot(211)
cl = 'r'
ax.plot([t.to_sec() for t in time_vec], vel_trans_diff, color=cl)
ax.plot([time_vec[0].to_sec(), time_vec[-1].to_sec()], [mean_trans_error, mean_trans_error], color=cl, linestyle='--', lw=0.5)
ax.fill_between([time_vec[0].to_sec(), time_vec[-1].to_sec()], [mean_trans_error-std_trans_error, mean_trans_error-std_trans_error],
[mean_trans_error+std_trans_error, mean_trans_error+std_trans_error], alpha=0.4, facecolor=cl)
ax.set_ylabel('Diff vel_trans [m/s]')
ax.grid('on')
ax = pl.subplot(212)
cl = 'b'
ax.plot([t.to_sec() for t in time_vec], vel_rot_diff, color=cl)
ax.plot([time_vec[0].to_sec(), time_vec[-1].to_sec()], [mean_rot_error, mean_rot_error], color=cl, linestyle='--', lw=0.5)
ax.fill_between([time_vec[0].to_sec(), time_vec[-1].to_sec()], [mean_rot_error-std_rot_error, mean_rot_error-std_rot_error],
[mean_rot_error+std_rot_error, mean_rot_error+std_rot_error], alpha=0.4, facecolor=cl)
ax.set_ylabel('Diff vel_rot [rad/s]')
ax.set_xlabel('time [s]')
ax.grid('on')
pl.show(block=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.