hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c334cd6809fae5b8cf2760630ff5474a4bc0f6b | 7,348 | py | Python | prepare.py | obitto/relation-network | 2cbea587c9d43d6e02dba8ddd79e9ae18eca5356 | [
"MIT"
] | null | null | null | prepare.py | obitto/relation-network | 2cbea587c9d43d6e02dba8ddd79e9ae18eca5356 | [
"MIT"
] | null | null | null | prepare.py | obitto/relation-network | 2cbea587c9d43d6e02dba8ddd79e9ae18eca5356 | [
"MIT"
] | null | null | null | import numpy as np
import json
from nltk.tokenize import word_tokenize
from PIL import Image
import os
import h5py
from random import shuffle
class ClevrDataset(object):
"""
This is the dataset for clevr task.
"""
def __init__(self, config, dataset_name = 'train' , shuffle = False, load_vocab = False):
if dataset_name == 'train':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_train_questions.json'
elif dataset_name == 'val':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_val_questions.json'
elif dataset_name == 'test':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_test_questions.json'
#hard coded answer index
self.answerSet = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8,'9':9, '10': 10,
'cyan':11, 'brown': 12, 'metal':13, 'cube': 14, 'purple': 15, 'green': 16,
'large':17, 'cylinder': 18, 'no': 19, 'blue': 20, 'yellow': 21, 'sphere': 22,
'red': 23, 'rubber': 24, 'yes': 25, 'gray': 26, 'small': 27}
self.questions = self.load_QA_data(self.question_path)
if load_vocab == False:
self.build_vocab()
self.save_vocab()
else:
self.load_vocab()
#max sentence length
self.max_length = 50
self.images = self.load_image(self.questions, dataset_name)
self.counter = 0
self.mean = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 1, 3)
self.std = np.array([0.229, 0.224, 0.224]).reshape(1, 1, 1, 3)
if shuffle == True:
shuffle(self.questions)
def tokenize(self, sentence, token_to_discard = ['?','.',',']):
"""
tokenize the word, and discard some certain tokens by requirement
"""
tokens = word_tokenize(sentence)
res = []
for token in tokens:
if token not in token_to_discard:
res.append(token)
return res
def build_vocab(self, min_count = 1, token_to_discard = []):
"""
build word2idx and idx2word vocabulary.
Args:
min_count : minimum occurrence of word to be included in the vocabulary.
token_to_discard: a list of token would be discard from the input sentence.
"""
token_count = {}
self.word2idx = {}
tokens = [self.tokenize(sentence[0], token_to_discard) for sentence in self.questions]
for i in range(len(tokens)):
self.questions[i] = [tokens[i], self.questions[i][1], self.questions[i][2]]
for word in tokens[i]:
if word not in token_count:
token_count[word] = 1
else:
token_count[word] = token_count[word] + 1
# add special
self.word2idx['unk'] = 0
self.word2idx['pad'] = 1
# extract word appear above the threshold
for word in token_count.keys():
if token_count[word] >= min_count:
self.word2idx[word] = len(self.word2idx)
# create idx to word dictionary
self.idx2word = {v: k for k, v in self.word2idx.items()}
def save_vocab(self, file = 'model/CLEVR_vocab.json'):
with open(file, 'w') as fp:
json.dump(self.word2idx, fp)
def load_vocab(self, file = 'model/CLEVR_vocab.json'):
with open(file, 'w') as fp:
self.word2idx = json.load(fp)
self.idx2word = {v: k for k, v in self.word2idx.items()}
def pad(self, question):
"""
pad the input question to desired length with token <pad>
"""
if (len(question) > self.max_length):
print("max lenght exceeded: ", len(question))
while(len(question) < self.max_length):
question.append(1)
return question
def convert2idx(self, sentence):
"""
convert sentence into index, so it can be feed into LSTM encoder
"""
idx = []
for word in sentence:
if word in self.word2idx.keys():
idx.append(self.word2idx[word])
else:
idx.append(0)
return idx
def load_QA_data(self, path, max_sample = None):
"""
load the question and answers.
"""
with open(path) as f:
data = json.load(f)
if max_sample != None:
questions = [(sample['question'], sample['answer'], sample['image_filename']) for sample in data['questions'][:max_sample]]
else:
questions = [(sample['question'], sample['answer'], sample['image_filename']) for sample in data['questions']]
return questions
def toOneHot(self, answer):
"""
convert answer to one hot.
"""
idx = self.answerSet[answer]
one_hot = np.zeros((1, len(self.answerSet)))
one_hot[0][idx] = 1.0
return one_hot
def load_image(self, questions, path):
"""
load the image, resize it to desired size, then normalize it
"""
prefix = 'data/CLEVR_v1.0/images/'
images = {}
for question in questions:
if question[2] not in images:
im = Image.open(prefix + path + '/' + questions[0][2])
im = im.convert('RGB')
im = im.resize((80, 80))
im = np.array(im)
images[question[2]] = im
return images
def create_sample_tuple(self, question, answer, image):
"""
create one sample
"""
sample = {
'question': self.pad(self.convert2idx(question)),
'answer': self.toOneHot(answer),
'image': image,
'seq_length': len(question),
'rotate': ( np.random.random_sample()- 0.5) / 10
}
return sample
def next_batch(self, batch_size):
"""
return a batch of data samples
"""
if (self.counter + batch_size) < len(self.questions):
batch = [self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[self.counter:self.counter+batch_size]]
self.counter += batch_size
else:
batch = [self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[self.counter:]]
self.counter = self.counter + batch_size - len(self.questions)
batch.extend([self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[:self.counter]])
return batch
def prepocess(self, image):
return (image / 255.0 - self.mean) / self.std
class SClevrDataset(object):
"""
This is the dataset for sort-of-clevr task.
"""
def __init__(self, config, name = 'train',shuffle = True):
self.path = 'data/' + name +'.json'
self.name = name
self.counter = 0
self.load_data()
def load_data(self):
"""
load the dataset from file
"""
self.images = {}
with open(self.path) as f:
data = json.load(f)
self.questions = data['qa']
#shuffle it if necessary
if shuffle == True:
shuffle(self.questions)
for image in data['image']:
self.images[image['id']] = np.array(image['image'])
def get_data(self, question):
"""
preprocessing and data augmentation
"""
idx = question['id']
img = self.images[idx]/255.0
q = np.array(question['question'], dtype = np.float32)
a = np.array(question['answer'], dtype = np.float32)
sample = {
'question': q,
'answer': a,
'image': np.expand_dims(img, axis = 0),
'rotate': ( np.random.random_sample()- 0.5) / 10
}
return sample
def next_batch(self, batch_size):
"""
return a batch of data samples
"""
if (self.counter + batch_size) < len(self.questions):
batch = [self.get_data(question) for question in self.questions[self.counter:(self.counter + batch_size)]]
self.counter += batch_size
else:
batch = [self.get_data(question) for question in self.questions[self.counter:]]
self.counter = self.counter + batch_size - len(self.questions)
if shuffle == True:
shuffle(self.questions)
batch.extend([self.get_data(question) for question in self.questions[:self.counter]])
return batch
| 30.87395 | 174 | 0.664807 | import numpy as np
import json
from nltk.tokenize import word_tokenize
from PIL import Image
import os
import h5py
from random import shuffle
class ClevrDataset(object):
def __init__(self, config, dataset_name = 'train' , shuffle = False, load_vocab = False):
if dataset_name == 'train':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_train_questions.json'
elif dataset_name == 'val':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_val_questions.json'
elif dataset_name == 'test':
self.question_path = 'data/CLEVR_v1.0/questions/CLEVR_test_questions.json'
self.answerSet = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8,'9':9, '10': 10,
'cyan':11, 'brown': 12, 'metal':13, 'cube': 14, 'purple': 15, 'green': 16,
'large':17, 'cylinder': 18, 'no': 19, 'blue': 20, 'yellow': 21, 'sphere': 22,
'red': 23, 'rubber': 24, 'yes': 25, 'gray': 26, 'small': 27}
self.questions = self.load_QA_data(self.question_path)
if load_vocab == False:
self.build_vocab()
self.save_vocab()
else:
self.load_vocab()
self.max_length = 50
self.images = self.load_image(self.questions, dataset_name)
self.counter = 0
self.mean = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 1, 3)
self.std = np.array([0.229, 0.224, 0.224]).reshape(1, 1, 1, 3)
if shuffle == True:
shuffle(self.questions)
def tokenize(self, sentence, token_to_discard = ['?','.',',']):
tokens = word_tokenize(sentence)
res = []
for token in tokens:
if token not in token_to_discard:
res.append(token)
return res
def build_vocab(self, min_count = 1, token_to_discard = []):
token_count = {}
self.word2idx = {}
tokens = [self.tokenize(sentence[0], token_to_discard) for sentence in self.questions]
for i in range(len(tokens)):
self.questions[i] = [tokens[i], self.questions[i][1], self.questions[i][2]]
for word in tokens[i]:
if word not in token_count:
token_count[word] = 1
else:
token_count[word] = token_count[word] + 1
self.word2idx['unk'] = 0
self.word2idx['pad'] = 1
for word in token_count.keys():
if token_count[word] >= min_count:
self.word2idx[word] = len(self.word2idx)
self.idx2word = {v: k for k, v in self.word2idx.items()}
def save_vocab(self, file = 'model/CLEVR_vocab.json'):
with open(file, 'w') as fp:
json.dump(self.word2idx, fp)
def load_vocab(self, file = 'model/CLEVR_vocab.json'):
with open(file, 'w') as fp:
self.word2idx = json.load(fp)
self.idx2word = {v: k for k, v in self.word2idx.items()}
def pad(self, question):
if (len(question) > self.max_length):
print("max lenght exceeded: ", len(question))
while(len(question) < self.max_length):
question.append(1)
return question
def convert2idx(self, sentence):
idx = []
for word in sentence:
if word in self.word2idx.keys():
idx.append(self.word2idx[word])
else:
idx.append(0)
return idx
def load_QA_data(self, path, max_sample = None):
with open(path) as f:
data = json.load(f)
if max_sample != None:
questions = [(sample['question'], sample['answer'], sample['image_filename']) for sample in data['questions'][:max_sample]]
else:
questions = [(sample['question'], sample['answer'], sample['image_filename']) for sample in data['questions']]
return questions
def toOneHot(self, answer):
idx = self.answerSet[answer]
one_hot = np.zeros((1, len(self.answerSet)))
one_hot[0][idx] = 1.0
return one_hot
def load_image(self, questions, path):
prefix = 'data/CLEVR_v1.0/images/'
images = {}
for question in questions:
if question[2] not in images:
im = Image.open(prefix + path + '/' + questions[0][2])
im = im.convert('RGB')
im = im.resize((80, 80))
im = np.array(im)
images[question[2]] = im
return images
def create_sample_tuple(self, question, answer, image):
sample = {
'question': self.pad(self.convert2idx(question)),
'answer': self.toOneHot(answer),
'image': image,
'seq_length': len(question),
'rotate': ( np.random.random_sample()- 0.5) / 10
}
return sample
def next_batch(self, batch_size):
if (self.counter + batch_size) < len(self.questions):
batch = [self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[self.counter:self.counter+batch_size]]
self.counter += batch_size
else:
batch = [self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[self.counter:]]
self.counter = self.counter + batch_size - len(self.questions)
batch.extend([self.create_sample_tuple(question[0], question[1], self.prepocess(self.images[question[2]])) for question in self.questions[:self.counter]])
return batch
def prepocess(self, image):
return (image / 255.0 - self.mean) / self.std
class SClevrDataset(object):
def __init__(self, config, name = 'train',shuffle = True):
self.path = 'data/' + name +'.json'
self.name = name
self.counter = 0
self.load_data()
def load_data(self):
self.images = {}
with open(self.path) as f:
data = json.load(f)
self.questions = data['qa']
if shuffle == True:
shuffle(self.questions)
for image in data['image']:
self.images[image['id']] = np.array(image['image'])
def get_data(self, question):
idx = question['id']
img = self.images[idx]/255.0
q = np.array(question['question'], dtype = np.float32)
a = np.array(question['answer'], dtype = np.float32)
sample = {
'question': q,
'answer': a,
'image': np.expand_dims(img, axis = 0),
'rotate': ( np.random.random_sample()- 0.5) / 10
}
return sample
def next_batch(self, batch_size):
if (self.counter + batch_size) < len(self.questions):
batch = [self.get_data(question) for question in self.questions[self.counter:(self.counter + batch_size)]]
self.counter += batch_size
else:
batch = [self.get_data(question) for question in self.questions[self.counter:]]
self.counter = self.counter + batch_size - len(self.questions)
if shuffle == True:
shuffle(self.questions)
batch.extend([self.get_data(question) for question in self.questions[:self.counter]])
return batch
| true | true |
1c334ddc98738db0e05cabf20e610f1a0a62f889 | 21,417 | py | Python | train.py | kyuhyoung/yolact | 98fc78e963264d2ec18cf1b85de7a328abcd6e96 | [
"MIT"
] | 3 | 2020-09-29T00:04:00.000Z | 2021-06-23T07:54:57.000Z | train.py | kyuhyoung/yolact | 98fc78e963264d2ec18cf1b85de7a328abcd6e96 | [
"MIT"
] | 1 | 2020-08-19T16:35:26.000Z | 2020-08-19T17:22:07.000Z | train.py | kyuhyoung/yolact | 98fc78e963264d2ec18cf1b85de7a328abcd6e96 | [
"MIT"
] | 3 | 2020-03-11T17:03:03.000Z | 2021-07-11T02:56:19.000Z | from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils.logger import Log
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math, random
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
# Oof
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=-1, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models.')
parser.add_argument('--log_folder', default='logs/',
help='Directory for saving logs.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=10000, type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=5000, type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epoch', default=2, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--no_log', dest='log', action='store_false',
help='Don\'t log per iteration information into log_folder.')
parser.add_argument('--log_gpu', dest='log_gpu', action='store_true',
help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')
parser.add_argument('--no_interrupt', dest='interrupt', action='store_false',
help='Don\'t save an interrupt when KeyboardInterrupt is caught.')
parser.add_argument('--batch_alloc', default=None, type=str,
help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')
parser.add_argument('--no_autoscale', dest='autoscale', action='store_false',
help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')
parser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
if args.autoscale and args.batch_size != 8:
factor = args.batch_size / 8
print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))
cfg.lr *= factor
cfg.max_iter //= factor
cfg.lr_steps = [x // factor for x in cfg.lr_steps]
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
# This is managed by set_lr
cur_lr = args.lr
if torch.cuda.device_count() == 0:
print('No GPUs detected. Exiting...')
exit(-1)
if args.batch_size // torch.cuda.device_count() < 6:
print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')
cfg.freeze_bn = True
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class NetLoss(nn.Module):
"""
A wrapper for running the network and computing the loss
This is so we can more efficiently use DataParallel.
"""
def __init__(self, net:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.net = net
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.net(images)
losses = self.criterion(self.net, preds, targets, masks, num_crowds)
return losses
class CustomDataParallel(nn.DataParallel):
"""
This is a custom version of DataParallel that works better with our training data.
It should also be faster than the general case.
"""
def scatter(self, inputs, kwargs, device_ids):
# More like scatter and data prep at the same time. The point is we prep the data in such a way
# that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.
devices = ['cuda:' + str(x) for x in device_ids]
splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)
return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \
[kwargs] * len(devices)
def gather(self, outputs, output_device):
out = {}
for k in outputs[0]:
out[k] = torch.stack([output[k].to(output_device) for output in outputs])
return out
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
if args.validation_epoch > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
if args.log:
log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),
overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)
# I don't use the timer during training (I use a different timing method).
# Apparently there's a race condition with multiple GPUs, so disable it just to be safe.
timer.disable_all()
# Both of these can set args.resume to None, so do them before the check
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio)
if args.batch_alloc is not None:
args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]
if sum(args.batch_alloc) != args.batch_size:
print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))
exit(-1)
net = CustomDataParallel(NetLoss(net, criterion))
if args.cuda:
net = net.cuda()
# Initialize everything
if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means
yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())
if not cfg.freeze_bn: yolact_net.freeze_bn(True)
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types # Forms the print order
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
# try-except so you can use ctrl+c to save early and stop training
try:
for epoch in range(num_epochs):
# Resume from start_iter
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
# Stop if we've reached an epoch if we're resuming from start_iter
if iteration == (epoch+1)*epoch_size:
break
# Stop at the configured number of iterations even if mid-epoch
if iteration == cfg.max_iter:
break
# Change a config setting if we've reached the specified iteration
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
# Warm up by linearly interpolating the learning rate from some smaller value
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
# Adjust the learning rate at the given iterations, but also if we resume from past that iteration
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
# Zero the grad to get ready to compute gradients
optimizer.zero_grad()
# Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)
losses = net(datum)
losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# no_inf_mean removes some components from the loss, so make sure to backward through all of it
# all_loss = sum([v.mean() for v in losses.values()])
# Backprop
loss.backward() # Do this to free up vram even if loss is not finite
if torch.isfinite(loss).item():
optimizer.step()
# Add the loss to the moving average for bookkeeping
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
# Exclude graph setup from the timing information
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
if args.log:
precision = 5
loss_info = {k: round(losses[k].item(), precision) for k in losses}
loss_info['T'] = round(losses[k].item(), precision)
if args.log_gpu:
log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow
log.log('train', loss=loss_info, epoch=epoch, iter=iteration,
lr=round(cur_lr, 10), elapsed=elapsed)
log.log_gpu_stats = args.log_gpu
iteration += 1
if iteration % args.save_interval == 0 and iteration != args.start_iter:
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
# This is done per epoch
if args.validation_epoch > 0:
if epoch % args.validation_epoch == 0 and epoch > 0:
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
# Compute validation mAP after training is finished
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
except KeyboardInterrupt:
if args.interrupt:
print('Stopping early. Saving network...')
# Delete previous copy of the interrupted network so we don't spam the weights folder
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
global cur_lr
cur_lr = new_lr
def gradinator(x):
x.requires_grad = False
return x
def prepare_data(datum, devices:list=None, allocation:list=None):
with torch.no_grad():
if devices is None:
devices = ['cuda:0'] if args.cuda else ['cpu']
if allocation is None:
allocation = [args.batch_size // len(devices)] * (len(devices) - 1)
allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less
images, (targets, masks, num_crowds) = datum
cur_idx = 0
for device, alloc in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
# Choose a random size from the batch
_, h, w = images[random.randint(0, len(images)-1)].size()
for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):
images[idx], targets[idx], masks[idx], num_crowds[idx] \
= enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
split_images, split_targets, split_masks, split_numcrowds \
= [[None for alloc in allocation] for _ in range(4)]
for device_idx, alloc in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)
split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]
split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]
split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]
cur_idx += alloc
return split_images, split_targets, split_masks, split_numcrowds
def no_inf_mean(x:torch.Tensor):
"""
Computes the mean of a vector, throwing out all inf values.
If there are no non-inf values, this will return inf (i.e., just the normal mean).
"""
no_inf = [a for a in x if torch.isfinite(a)]
if len(no_inf) > 0:
return sum(no_inf) / len(no_inf)
else:
return x.mean()
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
# Don't switch to eval mode because we want to get losses
iterations = 0
for datum in data_loader:
images, targets, masks, num_crowds = prepare_data(datum)
out = net(images)
wrapper = ScatterWrapper(targets, masks, num_crowds)
_losses = criterion(out, wrapper, wrapper.make_mask())
for k, v in _losses.items():
v = v.mean().item()
if k in losses:
losses[k] += v
else:
losses[k] = v
iterations += 1
if args.validation_size <= iterations * args.batch_size:
break
for k in losses:
losses[k] /= iterations
loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])
print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
with torch.no_grad():
yolact_net.eval()
start = time.time()
print()
print("Computing validation mAP (this may take a while)...", flush=True)
val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
end = time.time()
if log is not None:
log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)
yolact_net.train()
def setup_eval():
eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])
if __name__ == '__main__':
train()
| 42.578529 | 196 | 0.608068 | from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils.logger import Log
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math, random
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=-1, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models.')
parser.add_argument('--log_folder', default='logs/',
help='Directory for saving logs.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=10000, type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=5000, type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epoch', default=2, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--no_log', dest='log', action='store_false',
help='Don\'t log per iteration information into log_folder.')
parser.add_argument('--log_gpu', dest='log_gpu', action='store_true',
help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')
parser.add_argument('--no_interrupt', dest='interrupt', action='store_false',
help='Don\'t save an interrupt when KeyboardInterrupt is caught.')
parser.add_argument('--batch_alloc', default=None, type=str,
help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')
parser.add_argument('--no_autoscale', dest='autoscale', action='store_false',
help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')
parser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
if args.autoscale and args.batch_size != 8:
factor = args.batch_size / 8
print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))
cfg.lr *= factor
cfg.max_iter //= factor
cfg.lr_steps = [x // factor for x in cfg.lr_steps]
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
# This is managed by set_lr
cur_lr = args.lr
if torch.cuda.device_count() == 0:
print('No GPUs detected. Exiting...')
exit(-1)
if args.batch_size // torch.cuda.device_count() < 6:
print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')
cfg.freeze_bn = True
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class NetLoss(nn.Module):
def __init__(self, net:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.net = net
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.net(images)
losses = self.criterion(self.net, preds, targets, masks, num_crowds)
return losses
class CustomDataParallel(nn.DataParallel):
def scatter(self, inputs, kwargs, device_ids):
devices = ['cuda:' + str(x) for x in device_ids]
splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)
return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \
[kwargs] * len(devices)
def gather(self, outputs, output_device):
out = {}
for k in outputs[0]:
out[k] = torch.stack([output[k].to(output_device) for output in outputs])
return out
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
if args.validation_epoch > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
if args.log:
log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),
overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)
# Apparently there's a race condition with multiple GPUs, so disable it just to be safe.
timer.disable_all()
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio)
if args.batch_alloc is not None:
args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]
if sum(args.batch_alloc) != args.batch_size:
print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))
exit(-1)
net = CustomDataParallel(NetLoss(net, criterion))
if args.cuda:
net = net.cuda()
if not cfg.freeze_bn: yolact_net.freeze_bn()
yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())
if not cfg.freeze_bn: yolact_net.freeze_bn(True)
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
try:
for epoch in range(num_epochs):
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
if iteration == (epoch+1)*epoch_size:
break
if iteration == cfg.max_iter:
break
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
optimizer.zero_grad()
losses = net(datum)
losses = { k: (v).mean() for k,v in losses.items() }
loss = sum([losses[k] for k in losses])
loss.backward()
if torch.isfinite(loss).item():
optimizer.step()
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
if args.log:
precision = 5
loss_info = {k: round(losses[k].item(), precision) for k in losses}
loss_info['T'] = round(losses[k].item(), precision)
if args.log_gpu:
log.log_gpu_stats = (iteration % 10 == 0)
log.log('train', loss=loss_info, epoch=epoch, iter=iteration,
lr=round(cur_lr, 10), elapsed=elapsed)
log.log_gpu_stats = args.log_gpu
iteration += 1
if iteration % args.save_interval == 0 and iteration != args.start_iter:
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
if args.validation_epoch > 0:
if epoch % args.validation_epoch == 0 and epoch > 0:
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
except KeyboardInterrupt:
if args.interrupt:
print('Stopping early. Saving network...')
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
global cur_lr
cur_lr = new_lr
def gradinator(x):
x.requires_grad = False
return x
def prepare_data(datum, devices:list=None, allocation:list=None):
with torch.no_grad():
if devices is None:
devices = ['cuda:0'] if args.cuda else ['cpu']
if allocation is None:
allocation = [args.batch_size // len(devices)] * (len(devices) - 1)
allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less
images, (targets, masks, num_crowds) = datum
cur_idx = 0
for device, alloc in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
# Choose a random size from the batch
_, h, w = images[random.randint(0, len(images)-1)].size()
for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):
images[idx], targets[idx], masks[idx], num_crowds[idx] \
= enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
split_images, split_targets, split_masks, split_numcrowds \
= [[None for alloc in allocation] for _ in range(4)]
for device_idx, alloc in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)
split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]
split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]
split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]
cur_idx += alloc
return split_images, split_targets, split_masks, split_numcrowds
def no_inf_mean(x:torch.Tensor):
no_inf = [a for a in x if torch.isfinite(a)]
if len(no_inf) > 0:
return sum(no_inf) / len(no_inf)
else:
return x.mean()
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
# Don't switch to eval mode because we want to get losses
iterations = 0
for datum in data_loader:
images, targets, masks, num_crowds = prepare_data(datum)
out = net(images)
wrapper = ScatterWrapper(targets, masks, num_crowds)
_losses = criterion(out, wrapper, wrapper.make_mask())
for k, v in _losses.items():
v = v.mean().item()
if k in losses:
losses[k] += v
else:
losses[k] = v
iterations += 1
if args.validation_size <= iterations * args.batch_size:
break
for k in losses:
losses[k] /= iterations
loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])
print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
with torch.no_grad():
yolact_net.eval()
start = time.time()
print()
print("Computing validation mAP (this may take a while)...", flush=True)
val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
end = time.time()
if log is not None:
log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)
yolact_net.train()
def setup_eval():
eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])
if __name__ == '__main__':
train()
| true | true |
1c334e49a1724d0025acbd6ef5ed3baac910efb5 | 3,269 | py | Python | xfeltor/load.py | HenrikJantti/xFELTOR | 321fe73ce28fa590baedabc0aa13c5ba50a32dff | [
"MIT"
] | null | null | null | xfeltor/load.py | HenrikJantti/xFELTOR | 321fe73ce28fa590baedabc0aa13c5ba50a32dff | [
"MIT"
] | 1 | 2022-01-26T11:53:42.000Z | 2022-01-26T11:53:42.000Z | xfeltor/load.py | HenrikJantti/xFELTOR | 321fe73ce28fa590baedabc0aa13c5ba50a32dff | [
"MIT"
] | null | null | null | import xarray as xr
import numpy as np
from typing import Union
import json
def open_feltordataset(
datapath: str = "./*.nc",
chunks: Union[int, dict] = None,
restart_indices: bool = False,
probes: bool = False,
**kwargs: dict,
) -> xr.Dataset:
"""Loads FELTOR output into one xarray Dataset. Can load either a single
output file or multiple coherent files for restarted simulations.
Parameters
----------
datapath : str or (list or tuple of xr.Dataset), optional
Path to the data to open. Can point to either a set of one or more *nc
files.
chunks : dict, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
By default, chunks will be chosen to load entire input files into memory at once.
This has a major impact on performance: please see the full documentation for more details:
http://xarray.pydata.org/en/stable/user-guide/dask.html#chunking-and-performance
restart_indices: bool, optional
if True, duplicate time steps from restared runs are kept
Keyword arguments are passed down to `xarray.open_mfdataset`, which in
turn passes extra kwargs down to `xarray.open_dataset`.
probes: bool, optional
if True, indicates that the dataset contains probes and associates values of the
x and y possition for each probe with the coresponding probe_id.
Also changes the combine option to "by_coords".
"""
if chunks is None:
chunks = {}
combine_opt = "by_coords" if probes else "nested"
ds = xr.open_mfdataset(
datapath,
chunks=chunks,
combine=combine_opt,
concat_dim="time",
decode_times=False,
join="outer",
**kwargs,
)
if restart_indices:
return ds
_, index = np.unique(ds["time"], return_index=True)
# store inputfile data in ds.attrs
input_variables = json.loads(ds.attrs["inputfile"])
for i in input_variables:
ds.attrs[i] = input_variables[i]
if probes:
x = np.unique(ds.px.values)
y = np.unique(ds.py.values)
ds = ds.assign_coords(
dict(
probe_x=x,
probe_y=y,
)
)
reshaped_prb = np.reshape(
ds.electrons_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
electrons_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
reshaped_prb = np.reshape(
ds.ions_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(ions_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb))
reshaped_prb = np.reshape(
ds.potential_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
potential_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
reshaped_prb = np.reshape(
ds.vorticity_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
vorticity_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
ds = ds.drop_dims(("probes"))
return ds.isel(time=index)
| 33.701031 | 99 | 0.61762 | import xarray as xr
import numpy as np
from typing import Union
import json
def open_feltordataset(
datapath: str = "./*.nc",
chunks: Union[int, dict] = None,
restart_indices: bool = False,
probes: bool = False,
**kwargs: dict,
) -> xr.Dataset:
if chunks is None:
chunks = {}
combine_opt = "by_coords" if probes else "nested"
ds = xr.open_mfdataset(
datapath,
chunks=chunks,
combine=combine_opt,
concat_dim="time",
decode_times=False,
join="outer",
**kwargs,
)
if restart_indices:
return ds
_, index = np.unique(ds["time"], return_index=True)
input_variables = json.loads(ds.attrs["inputfile"])
for i in input_variables:
ds.attrs[i] = input_variables[i]
if probes:
x = np.unique(ds.px.values)
y = np.unique(ds.py.values)
ds = ds.assign_coords(
dict(
probe_x=x,
probe_y=y,
)
)
reshaped_prb = np.reshape(
ds.electrons_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
electrons_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
reshaped_prb = np.reshape(
ds.ions_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(ions_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb))
reshaped_prb = np.reshape(
ds.potential_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
potential_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
reshaped_prb = np.reshape(
ds.vorticity_prb.values, (y.size, x.size, ds.probe_time.values.size)
)
ds = ds.assign(
vorticity_prb=(["probe_y", "probe_x", "probe_time"], reshaped_prb)
)
ds = ds.drop_dims(("probes"))
return ds.isel(time=index)
| true | true |
1c334fb05c5a8d5c3556b5f5c031aaf7ad9fdb70 | 14,472 | py | Python | flax/linen/normalization.py | AI-App/FLAX | 25dd368d9f35d276fe0999f9468c71a284eb3d96 | [
"Apache-2.0"
] | null | null | null | flax/linen/normalization.py | AI-App/FLAX | 25dd368d9f35d276fe0999f9468c71a284eb3d96 | [
"Apache-2.0"
] | 10 | 2022-01-19T11:32:54.000Z | 2022-02-09T23:47:20.000Z | flax/linen/normalization.py | slowy07/flax | fb4f2a455d4cae383f11d77707bf0d0f18a45e70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization modules for Flax."""
from typing import (Any, Callable, Optional, Tuple, Iterable, Union)
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
from flax.linen.module import Module, compact, merge_param
PRNGKey = Any
Array = Any
Shape = Tuple[int]
Dtype = Any # this could be a real type?
Axes = Union[int, Iterable[int]]
def _canonicalize_axes(rank: int, axes: Axes) -> Iterable[int]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
def _compute_stats(x: Array, axes: Axes,
axis_name: Optional[str] = None,
axis_index_groups: Any = None):
"""Computes mean and variance statistics.
This implementation takes care of a few important details:
- Computes in float32 precision for half precision inputs
- mean and variance is computable in a single XLA fusion,
by using Var = E[x^2] - E[x]^2 instead of Var = E[(x - E[x])^2]).
- Clips negative variances to zero which can happen due to
roundoff errors. This avoids downstream NaNs.
- Supports averaging across a parallel axis and subgroups of a parallel axis
with a single `lax.pmean` call to avoid latency.
"""
x = jnp.asarray(x, jnp.float32)
mean = jnp.mean(x, axes)
mean2 = jnp.mean(lax.square(x), axes)
if axis_name is not None:
concatenated_mean = jnp.concatenate([mean, mean2])
mean, mean2 = jnp.split(
lax.pmean(
concatenated_mean,
axis_name=axis_name,
axis_index_groups=axis_index_groups), 2)
# mean2 - lax.square(mean) is not guaranteed to be non-negative due
# to floating point round-off errors.
var = jnp.maximum(0., mean2 - lax.square(mean))
return mean, var
def _normalize(mdl: Module, x: Array, mean: Array, var: Array,
reduction_axes: Axes, feature_axes: Axes,
dtype: Dtype, param_dtype: Dtype,
epsilon: float,
use_bias: bool, use_scale: bool,
bias_init: Callable[[PRNGKey, Shape, Dtype], Array],
scale_init: Callable[[PRNGKey, Shape, Dtype], Array]):
""""Normalizes the input of a normalization layer and optionally applies a learned scale and bias.
A seperate bias and scale is learned for each feature as specified by feature_axes.
"""
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
y = x - mean
mul = lax.rsqrt(var + epsilon)
if use_scale:
scale = mdl.param('scale', scale_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
mul *= scale
y *= mul
if use_bias:
bias = mdl.param('bias', bias_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
y += bias
return jnp.asarray(y, dtype)
class BatchNorm(Module):
"""BatchNorm Module.
Usage Note:
If we define a model with BatchNorm, for example::
BN = nn.BatchNorm(use_running_average=False, momentum=0.9, epsilon=1e-5,
dtype=jnp.float32)
The initialized variables dict will contain in addition to a 'params'
collection a separate 'batch_stats' collection that will contain all the
running statistics for all the BatchNorm layers in a model::
vars_initialized = BN.init(key, x) # {'params': ..., 'batch_stats': ...}
We then update the batch_stats during training by specifying that the
`batch_stats` collection is mutable in the `apply` method for our module.::
vars_in = {'params': params, 'batch_stats': old_batch_stats}
y, mutated_vars = BN.apply(vars_in, x, mutable=['batch_stats'])
new_batch_stats = mutated_vars['batch_stats']
During eval we would define BN with `use_running_average=True` and use the
batch_stats collection from training to set the statistics. In this case
we are not mutating the batch statistics collection, and needn't mark it
mutable::
vars_in = {'params': params, 'batch_stats': training_batch_stats}
y = BN.apply(vars_in, x)
Attributes:
use_running_average: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
axis: the feature or non-batch axis of the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
epsilon: a small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: if True, bias (beta) is added.
use_scale: if True, multiply by scale (gamma).
When the next layer is linear (also e.g. nn.relu), this can be disabled
since the scaling will be done by the next layer.
bias_init: initializer for bias, by default, zero.
scale_init: initializer for scale, by default, one.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
axis_index_groups: groups of axis indices within that named axis
representing subsets of devices to reduce over (default: None). For
example, `[[0, 1], [2, 3]]` would independently batch-normalize over
the examples on the first two and last two devices. See `jax.lax.psum`
for more details.
"""
use_running_average: Optional[bool] = None
axis: int = -1
momentum: float = 0.99
epsilon: float = 1e-5
dtype: Dtype = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
axis_name: Optional[str] = None
axis_index_groups: Any = None
@compact
def __call__(self, x, use_running_average: Optional[bool] = None):
"""Normalizes the input using batch statistics.
NOTE:
During initialization (when parameters are mutable) the running average
of the batch statistics will not be updated. Therefore, the inputs
fed during initialization don't need to match that of the actual input
distribution and the reduction axis (set with `axis_name`) does not have
to exist.
Args:
x: the input to be normalized.
use_running_average: if true, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
Returns:
Normalized inputs (the same shape as inputs).
"""
use_running_average = merge_param(
'use_running_average', self.use_running_average, use_running_average)
feature_axes = _canonicalize_axes(x.ndim, self.axis)
reduction_axes = tuple(i for i in range(x.ndim) if i not in feature_axes)
feature_shape = [x.shape[ax] for ax in feature_axes]
# see NOTE above on initialization behavior
initializing = self.is_mutable_collection('params')
ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, jnp.float32),
feature_shape)
ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, jnp.float32),
feature_shape)
if use_running_average:
mean, var = ra_mean.value, ra_var.value
else:
mean, var = _compute_stats(
x, reduction_axes,
axis_name=self.axis_name if not initializing else None,
axis_index_groups=self.axis_index_groups)
if not initializing:
ra_mean.value = self.momentum * ra_mean.value + (1 - self.momentum) * mean
ra_var.value = self.momentum * ra_var.value + (1 - self.momentum) * var
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class LayerNorm(Module):
"""Layer normalization (https://arxiv.org/abs/1607.06450).
Operates on the last axis of the input data.
It normalizes the activations of the layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within
each example close to 0 and the activation standard deviation close to 1.
Attributes:
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
epsilon: float = 1e-6
dtype: Any = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies layer normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
reduction_axes = (-1,)
feature_axes = (-1,)
# TODO suport axis_name for model parallelism?
mean, var = _compute_stats(x, reduction_axes, None, None)
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class GroupNorm(Module):
"""Group normalization (arxiv.org/abs/1803.08494).
This op is similar to batch normalization, but statistics are shared across
equally-sized groups of channels and not shared across batch dimension.
Thus, group normalization does not depend on the batch composition and does
not require maintaining internal state for storing statistics.
The user should either specify the total number of channel groups or the
number of channels per group.
Attributes:
num_groups: the total number of channel groups. The default value of 32 is
proposed by the original group normalization paper.
group_size: the number of channels in a group.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
num_groups: Optional[int] = 32
group_size: Optional[int] = None
epsilon: float = 1e-6
dtype: Any = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies group normalization to the input (arxiv.org/abs/1803.08494).
Args:
x: the input of shape N...C, where N is a batch dimension and C is a
channels dimensions. `...` represents an arbitrary number of extra
dimensions that are used to accumulate statistics over.
Returns:
Normalized inputs (the same shape as inputs).
"""
reduction_axes = list(range(1, x.ndim - 1)) + [-1]
feature_axes = (-1,)
if ((self.num_groups is None and self.group_size is None) or
(self.num_groups is not None and self.group_size is not None)):
raise ValueError('Either `num_groups` or `group_size` should be '
'specified, but not both of them.')
num_groups = self.num_groups
channels = x.shape[-1]
if self.group_size is not None:
if channels % self.group_size != 0:
raise ValueError('Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, self.group_size))
num_groups = channels // self.group_size
if num_groups <= 0 or channels % num_groups != 0:
raise ValueError('Number of groups ({}) does not divide the number'
' of channels ({}).'.format(num_groups, channels))
group_size = x.shape[-1] // num_groups
group_shape = x.shape[:-1] + (num_groups, group_size)
def broadcast_stat(stat):
stat = jnp.broadcast_to(stat[..., None], (x.shape[0], num_groups, group_size))
return stat.reshape((x.shape[0], num_groups * group_size))
# TODO suport axis_name for model parallelism?
mean, var = _compute_stats(x.reshape(group_shape), reduction_axes, None, None)
mean = broadcast_stat(mean)
var = broadcast_stat(var)
return _normalize(
self, x, mean, var, reduction_axes[:-1], feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
| 39.867769 | 100 | 0.6894 |
from typing import (Any, Callable, Optional, Tuple, Iterable, Union)
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
from flax.linen.module import Module, compact, merge_param
PRNGKey = Any
Array = Any
Shape = Tuple[int]
Dtype = Any
Axes = Union[int, Iterable[int]]
def _canonicalize_axes(rank: int, axes: Axes) -> Iterable[int]:
if not isinstance(axes, Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
def _compute_stats(x: Array, axes: Axes,
axis_name: Optional[str] = None,
axis_index_groups: Any = None):
x = jnp.asarray(x, jnp.float32)
mean = jnp.mean(x, axes)
mean2 = jnp.mean(lax.square(x), axes)
if axis_name is not None:
concatenated_mean = jnp.concatenate([mean, mean2])
mean, mean2 = jnp.split(
lax.pmean(
concatenated_mean,
axis_name=axis_name,
axis_index_groups=axis_index_groups), 2)
var = jnp.maximum(0., mean2 - lax.square(mean))
return mean, var
def _normalize(mdl: Module, x: Array, mean: Array, var: Array,
reduction_axes: Axes, feature_axes: Axes,
dtype: Dtype, param_dtype: Dtype,
epsilon: float,
use_bias: bool, use_scale: bool,
bias_init: Callable[[PRNGKey, Shape, Dtype], Array],
scale_init: Callable[[PRNGKey, Shape, Dtype], Array]):
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
y = x - mean
mul = lax.rsqrt(var + epsilon)
if use_scale:
scale = mdl.param('scale', scale_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
mul *= scale
y *= mul
if use_bias:
bias = mdl.param('bias', bias_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
y += bias
return jnp.asarray(y, dtype)
class BatchNorm(Module):
use_running_average: Optional[bool] = None
axis: int = -1
momentum: float = 0.99
epsilon: float = 1e-5
dtype: Dtype = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
axis_name: Optional[str] = None
axis_index_groups: Any = None
@compact
def __call__(self, x, use_running_average: Optional[bool] = None):
use_running_average = merge_param(
'use_running_average', self.use_running_average, use_running_average)
feature_axes = _canonicalize_axes(x.ndim, self.axis)
reduction_axes = tuple(i for i in range(x.ndim) if i not in feature_axes)
feature_shape = [x.shape[ax] for ax in feature_axes]
initializing = self.is_mutable_collection('params')
ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, jnp.float32),
feature_shape)
ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, jnp.float32),
feature_shape)
if use_running_average:
mean, var = ra_mean.value, ra_var.value
else:
mean, var = _compute_stats(
x, reduction_axes,
axis_name=self.axis_name if not initializing else None,
axis_index_groups=self.axis_index_groups)
if not initializing:
ra_mean.value = self.momentum * ra_mean.value + (1 - self.momentum) * mean
ra_var.value = self.momentum * ra_var.value + (1 - self.momentum) * var
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class LayerNorm(Module):
epsilon: float = 1e-6
dtype: Any = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
reduction_axes = (-1,)
feature_axes = (-1,)
mean, var = _compute_stats(x, reduction_axes, None, None)
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class GroupNorm(Module):
num_groups: Optional[int] = 32
group_size: Optional[int] = None
epsilon: float = 1e-6
dtype: Any = jnp.float32
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
reduction_axes = list(range(1, x.ndim - 1)) + [-1]
feature_axes = (-1,)
if ((self.num_groups is None and self.group_size is None) or
(self.num_groups is not None and self.group_size is not None)):
raise ValueError('Either `num_groups` or `group_size` should be '
'specified, but not both of them.')
num_groups = self.num_groups
channels = x.shape[-1]
if self.group_size is not None:
if channels % self.group_size != 0:
raise ValueError('Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, self.group_size))
num_groups = channels // self.group_size
if num_groups <= 0 or channels % num_groups != 0:
raise ValueError('Number of groups ({}) does not divide the number'
' of channels ({}).'.format(num_groups, channels))
group_size = x.shape[-1] // num_groups
group_shape = x.shape[:-1] + (num_groups, group_size)
def broadcast_stat(stat):
stat = jnp.broadcast_to(stat[..., None], (x.shape[0], num_groups, group_size))
return stat.reshape((x.shape[0], num_groups * group_size))
mean, var = _compute_stats(x.reshape(group_shape), reduction_axes, None, None)
mean = broadcast_stat(mean)
var = broadcast_stat(var)
return _normalize(
self, x, mean, var, reduction_axes[:-1], feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
| true | true |
1c33502e4d76d3079ade89c8c30b3af81f805584 | 268 | py | Python | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_5/ar_/test_artificial_32_Difference_ConstantTrend_5__20.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_5/ar_/test_artificial_32_Difference_ConstantTrend_5__20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Difference/trend_ConstantTrend/cycle_5/ar_/test_artificial_32_Difference_ConstantTrend_5__20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | 38.285714 | 168 | 0.735075 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | true | true |
1c3350aaa6a4bb05a67615451625477c84bb2507 | 3,667 | py | Python | repoman/depot_operations.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 6 | 2015-08-10T09:42:55.000Z | 2021-11-08T10:26:02.000Z | repoman/depot_operations.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 11 | 2017-08-28T17:38:24.000Z | 2019-05-31T12:49:31.000Z | repoman/depot_operations.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 7 | 2015-02-14T16:15:41.000Z | 2021-09-29T09:53:26.000Z | #!/usr/bin/env python
#
# Copyright 2014 Tuenti Technologies S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from repoman.depot import Depot
class DepotOperations(object):
KIND = None
@classmethod
def get_depot_operations(cls, repo_kind):
try:
# __import__ in python < 2.7 works not very well
# TODO: migrate to python 3.0 and change this
mod = __import__("repoman.%s.depot_operations" % (repo_kind),
fromlist=['DepotOperations'])
ConcreteDepotOperations = getattr(mod, 'DepotOperations')
return ConcreteDepotOperations()
except:
raise NotImplementedError
def check_changeset_availability(self, path, changesets):
""" Check for changesets are already in the specified depot path.
Always request all changesets from all sources. This means
that the changesets will always be missing.
:param path: Path to the depot.
:param changesets: List of strings specifying the changesets.
:returns: List of changesets missing
"""
raise NotImplementedError
def grab_changesets(self, path, url, changesets):
"""
Copies changesets from the remote url to the specified path.
:param path: target depot for the changesets.
:param url: depot to copy the changesets from.
:param changesets: List of changesets ids.
:returns: True.
"""
raise NotImplementedError
def init_depot(self, path, parent=None, source=None):
"""
Initializes a new depot
:param path: path to the main depot
:returns: Depot class corresponding to the path. False otherwise.
"""
raise NotImplementedError
def is_a_depot(self, path):
"""
Check if the given path corresponds to a depot.
:param path: path to the supposed depot
:returns: True if a depot. False otherwise.
"""
raise NotImplementedError
def get_depot_from_path(self, path, parent=None):
"""
Factory method that creates Depots from a given path
:param path: Path of the depot
:returns: Depot class corresponding to the path.
"""
self._locks_cleanup(path)
return Depot(path, parent, self)
def _locks_cleanup(self, path):
"""
Make sure that a clone has no unreleased locks because of some failed
process.
Implementation is not mandatory, but recommended in SCMs with locking
mechanisms.
:param path: Path of the depot
"""
pass
def clear_depot(self, path, parent=None):
"""
Clear a depot just in case a previous usage let it dirty
This should also reset configuration
:param path: Path of the depot
:param parent:
"""
raise NotImplementedError
def set_source(self, path, source):
"""
Set the default remote source.
:param path: Path of the depot
:param source: Remote URI of the source repo
"""
raise NotImplementedError
| 30.815126 | 77 | 0.641942 |
from repoman.depot import Depot
class DepotOperations(object):
KIND = None
@classmethod
def get_depot_operations(cls, repo_kind):
try:
mod = __import__("repoman.%s.depot_operations" % (repo_kind),
fromlist=['DepotOperations'])
ConcreteDepotOperations = getattr(mod, 'DepotOperations')
return ConcreteDepotOperations()
except:
raise NotImplementedError
def check_changeset_availability(self, path, changesets):
raise NotImplementedError
def grab_changesets(self, path, url, changesets):
raise NotImplementedError
def init_depot(self, path, parent=None, source=None):
raise NotImplementedError
def is_a_depot(self, path):
raise NotImplementedError
def get_depot_from_path(self, path, parent=None):
self._locks_cleanup(path)
return Depot(path, parent, self)
def _locks_cleanup(self, path):
pass
def clear_depot(self, path, parent=None):
raise NotImplementedError
def set_source(self, path, source):
raise NotImplementedError
| true | true |
1c3350fd75ff2f1bea44df5b72d7e2556a0ee1e7 | 441 | py | Python | rest/recording/list-get-example-2/list-get-example-2.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2017-11-23T11:31:20.000Z | 2018-01-22T04:14:02.000Z | rest/recording/list-get-example-2/list-get-example-2.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | null | null | null | rest/recording/list-get-example-2/list-get-example-2.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2020-05-22T23:31:21.000Z | 2021-06-10T18:33:45.000Z | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
# A list of recording objects with the properties described above
recordings = client.recordings.list(date_created="2016-10-18")
| 40.090909 | 72 | 0.823129 |
from twilio.rest import TwilioRestClient
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
recordings = client.recordings.list(date_created="2016-10-18")
| true | true |
1c3351362491dc874ae6c3ccf9a6c1876506b1e8 | 4,730 | py | Python | docs/check_and_draw_box.py | alexchungio/under-water-detect | 312672ccbe5e31ca21dffab26e1438ea190f3e5a | [
"Apache-2.0"
] | 1 | 2022-02-17T12:14:59.000Z | 2022-02-17T12:14:59.000Z | docs/check_and_draw_box.py | alexchungio/under-water-object-detect-2020 | 312672ccbe5e31ca21dffab26e1438ea190f3e5a | [
"Apache-2.0"
] | null | null | null | docs/check_and_draw_box.py | alexchungio/under-water-object-detect-2020 | 312672ccbe5e31ca21dffab26e1438ea190f3e5a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : check_and_draw_box.py
# @ Description:
# @ Author : Alex Chung
# @ Contact : yonganzhong@outlook.com
# @ License : Copyright (c) 2017-2018
# @ Time : 2021/1/26 下午2:43
# @ Software : PyCharm
#-------------------------------------------------------
import os
import json
import os.path as osp
import numpy as np
from PIL import Image, ImageFont, ImageDraw
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from tqdm import tqdm
def draw_box_with_pil(image, bbox, label, color_dict):
"""
:param image:
:param bbox:
:param label:
:param color_dict:
:return:
"""
img_w = image.size[0]
img_h = image.size[1]
bbox = np.array(bbox, dtype=np.int32).reshape(-1, 4)
# print('image shape ({},{})'.format(img_w, img_h))
# set font
font = ImageFont.truetype(font=fm.findfont(fm.FontProperties()),
size=np.floor(1.5e-2 * img_w ).astype(np.int32), encoding="unic")
# draw box
draw = ImageDraw.Draw(image)
for box, tag in zip(bbox, label):
# get label size
label_size = draw.textsize(tag, font)
# get label start point
text_origin = np.array([box[0], box[1] - label_size[1]])
# draw bbox rectangle and label rectangle
draw.rectangle([box[0], box[1], box[2], box[3]], outline=color_dict[tag], width=2)
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color_dict[tag])
draw.text(text_origin, str(tag), fill=(255, 255, 255), font=font)
return image
def check_bbox_boundary(images_info, annotations_info, img_dir, box_img_dir, label_tag, color_dict):
"""
:return:
"""
for img in tqdm(images_info):
img_name = img['file_name']
img_id = img['id']
img_w, img_h = img['width'], img['height']
# get image bbox
bboxs = []
labels = []
for anns in annotations_info:
if anns['image_id'] == img['id']:
x1, y1, w, h = anns['bbox']
w, h = w -1, h - 1
if anns['area'] < 0 or w < 0 or h < 0:
print(anns['area'], w, h)
continue
# x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h
# restrict bbox to image area
x1 = max(x1, 0)
y1 = max(y1, 0)
x2 = min(x1 + w, img_w)
y2 = min(y1 + h, img_h)
bboxs.append([x1, y1, x2, y2])
labels.append(anns['category_id'])
bboxs = np.array(bboxs, dtype=np.int32).reshape(-1, 4)
# assert (bboxs[:, 2] >= 1).all(), "Warning, {} bbox tag error in width aspect {}".format(img_name, bboxs)
# assert (bboxs[:, 3] >= 1).all(), "Warning, {} bbox tag error in height aspect {}".format(img_name, bboxs)
# bboxs[:, 2:] = bboxs[:,:2] + bboxs[:, 2:]
assert (bboxs[:, 0] >= 0).all() and (bboxs[:, 2] <= img_w).all(), \
"Warning, {} bbox size out of range in width aspect {} {}".format(img_name, bboxs, img_w)
assert (bboxs[:, 1] >= 0).all() and ( bboxs[:, 3] <= img_h).all(), \
"Warning, {} bbox size out of range in height aspect {} {}".format(img_name, bboxs, img_h)
# draw box on image
label = [label_tag[label] for label in labels]
image = Image.open(osp.join(img_dir, img_name))
box_img = draw_box_with_pil(image, bboxs, label, color_dict)
box_img.save(osp.join(box_img_dir, img_name))
def main():
json_path = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/annotation/voc_all.json'
img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/image'
box_img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/box_image'
# load annotation
with open(json_path) as f:
all_data = json.load(f)
images_info = all_data['images']
annotations_info = []
for ann in all_data['annotations']:
ann.pop('id') # remove annotation id
ann.pop('iscrowd')
annotations_info.append(ann)
category_dict = {x['name']: x['id'] for x in all_data['categories']}
label_tag = {id:name for name, id in category_dict.items()}
color_dict = {'echinus': 'red', 'starfish': 'green', 'holothurian': 'blue', 'scallop': 'purple'}
os.makedirs(box_img_dir, exist_ok=True)
check_bbox_boundary(images_info, annotations_info, img_dir=img_dir, box_img_dir=box_img_dir, label_tag=label_tag,
color_dict=color_dict)
print('Done')
if __name__ == "__main__":
main()
| 35.56391 | 117 | 0.571247 |
import os
import json
import os.path as osp
import numpy as np
from PIL import Image, ImageFont, ImageDraw
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from tqdm import tqdm
def draw_box_with_pil(image, bbox, label, color_dict):
img_w = image.size[0]
img_h = image.size[1]
bbox = np.array(bbox, dtype=np.int32).reshape(-1, 4)
font = ImageFont.truetype(font=fm.findfont(fm.FontProperties()),
size=np.floor(1.5e-2 * img_w ).astype(np.int32), encoding="unic")
draw = ImageDraw.Draw(image)
for box, tag in zip(bbox, label):
label_size = draw.textsize(tag, font)
text_origin = np.array([box[0], box[1] - label_size[1]])
draw.rectangle([box[0], box[1], box[2], box[3]], outline=color_dict[tag], width=2)
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color_dict[tag])
draw.text(text_origin, str(tag), fill=(255, 255, 255), font=font)
return image
def check_bbox_boundary(images_info, annotations_info, img_dir, box_img_dir, label_tag, color_dict):
for img in tqdm(images_info):
img_name = img['file_name']
img_id = img['id']
img_w, img_h = img['width'], img['height']
bboxs = []
labels = []
for anns in annotations_info:
if anns['image_id'] == img['id']:
x1, y1, w, h = anns['bbox']
w, h = w -1, h - 1
if anns['area'] < 0 or w < 0 or h < 0:
print(anns['area'], w, h)
continue
x1 = max(x1, 0)
y1 = max(y1, 0)
x2 = min(x1 + w, img_w)
y2 = min(y1 + h, img_h)
bboxs.append([x1, y1, x2, y2])
labels.append(anns['category_id'])
bboxs = np.array(bboxs, dtype=np.int32).reshape(-1, 4)
assert (bboxs[:, 0] >= 0).all() and (bboxs[:, 2] <= img_w).all(), \
"Warning, {} bbox size out of range in width aspect {} {}".format(img_name, bboxs, img_w)
assert (bboxs[:, 1] >= 0).all() and ( bboxs[:, 3] <= img_h).all(), \
"Warning, {} bbox size out of range in height aspect {} {}".format(img_name, bboxs, img_h)
label = [label_tag[label] for label in labels]
image = Image.open(osp.join(img_dir, img_name))
box_img = draw_box_with_pil(image, bboxs, label, color_dict)
box_img.save(osp.join(box_img_dir, img_name))
def main():
json_path = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/annotation/voc_all.json'
img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/image'
box_img_dir = '/media/alex/80CA308ECA308288/alex_dataset/URPC-2020/train/box_image'
with open(json_path) as f:
all_data = json.load(f)
images_info = all_data['images']
annotations_info = []
for ann in all_data['annotations']:
ann.pop('id')
ann.pop('iscrowd')
annotations_info.append(ann)
category_dict = {x['name']: x['id'] for x in all_data['categories']}
label_tag = {id:name for name, id in category_dict.items()}
color_dict = {'echinus': 'red', 'starfish': 'green', 'holothurian': 'blue', 'scallop': 'purple'}
os.makedirs(box_img_dir, exist_ok=True)
check_bbox_boundary(images_info, annotations_info, img_dir=img_dir, box_img_dir=box_img_dir, label_tag=label_tag,
color_dict=color_dict)
print('Done')
if __name__ == "__main__":
main()
| true | true |
1c33516987ce24441f9b98964ec2c2675d908382 | 9,566 | py | Python | sunshine_conversations_client/model/message_webhook.py | Dima2022/sunshine-conversations-python | 8085a82dc320d97f09bb0174d11dd1865a65404a | [
"Apache-2.0"
] | 4 | 2020-09-27T14:28:25.000Z | 2022-02-02T13:51:29.000Z | sunshine_conversations_client/model/message_webhook.py | Dima2022/sunshine-conversations-python | 8085a82dc320d97f09bb0174d11dd1865a65404a | [
"Apache-2.0"
] | 3 | 2021-09-30T18:18:58.000Z | 2021-12-04T07:55:23.000Z | sunshine_conversations_client/model/message_webhook.py | Dima2022/sunshine-conversations-python | 8085a82dc320d97f09bb0174d11dd1865a65404a | [
"Apache-2.0"
] | 5 | 2020-11-07T02:08:18.000Z | 2021-12-07T17:10:23.000Z | # coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class MessageWebhook(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'received': 'str',
'author': 'AuthorWebhook',
'content': 'Content',
'source': 'Source',
'quoted_message': 'QuotedMessage',
'metadata': 'object',
'deleted': 'bool'
}
attribute_map = {
'id': 'id',
'received': 'received',
'author': 'author',
'content': 'content',
'source': 'source',
'quoted_message': 'quotedMessage',
'metadata': 'metadata',
'deleted': 'deleted'
}
nulls = set()
def __init__(self, id=None, received=None, author=None, content=None, source=None, quoted_message=Undefined(), metadata=Undefined(), deleted=Undefined(), local_vars_configuration=None): # noqa: E501
"""MessageWebhook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._received = None
self._author = None
self._content = None
self._source = None
self._quoted_message = None
self._metadata = None
self._deleted = None
self.discriminator = None
if id is not None:
self.id = id
if received is not None:
self.received = received
if author is not None:
self.author = author
if content is not None:
self.content = content
if source is not None:
self.source = source
self.quoted_message = quoted_message
self.metadata = metadata
self.deleted = deleted
@property
def id(self):
"""Gets the id of this MessageWebhook. # noqa: E501
The unique ID of the message. # noqa: E501
:return: The id of this MessageWebhook. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MessageWebhook.
The unique ID of the message. # noqa: E501
:param id: The id of this MessageWebhook. # noqa: E501
:type: str
"""
self._id = id
@property
def received(self):
"""Gets the received of this MessageWebhook. # noqa: E501
A datetime string with the format `YYYY-MM-DDThh:mm:ss.SSSZ` representing when Sunshine Conversations received the message. # noqa: E501
:return: The received of this MessageWebhook. # noqa: E501
:rtype: str
"""
return self._received
@received.setter
def received(self, received):
"""Sets the received of this MessageWebhook.
A datetime string with the format `YYYY-MM-DDThh:mm:ss.SSSZ` representing when Sunshine Conversations received the message. # noqa: E501
:param received: The received of this MessageWebhook. # noqa: E501
:type: str
"""
self._received = received
@property
def author(self):
"""Gets the author of this MessageWebhook. # noqa: E501
:return: The author of this MessageWebhook. # noqa: E501
:rtype: AuthorWebhook
"""
return self._author
@author.setter
def author(self, author):
"""Sets the author of this MessageWebhook.
:param author: The author of this MessageWebhook. # noqa: E501
:type: AuthorWebhook
"""
self._author = author
@property
def content(self):
"""Gets the content of this MessageWebhook. # noqa: E501
The content of the message. # noqa: E501
:return: The content of this MessageWebhook. # noqa: E501
:rtype: Content
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this MessageWebhook.
The content of the message. # noqa: E501
:param content: The content of this MessageWebhook. # noqa: E501
:type: Content
"""
self._content = content
@property
def source(self):
"""Gets the source of this MessageWebhook. # noqa: E501
:return: The source of this MessageWebhook. # noqa: E501
:rtype: Source
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this MessageWebhook.
:param source: The source of this MessageWebhook. # noqa: E501
:type: Source
"""
self._source = source
@property
def quoted_message(self):
"""Gets the quoted_message of this MessageWebhook. # noqa: E501
The quoted message is currently only available for WhatsApp and Web Messenger `formResponse` messages. # noqa: E501
:return: The quoted_message of this MessageWebhook. # noqa: E501
:rtype: QuotedMessage
"""
return self._quoted_message
@quoted_message.setter
def quoted_message(self, quoted_message):
"""Sets the quoted_message of this MessageWebhook.
The quoted message is currently only available for WhatsApp and Web Messenger `formResponse` messages. # noqa: E501
:param quoted_message: The quoted_message of this MessageWebhook. # noqa: E501
:type: QuotedMessage
"""
if type(quoted_message) is Undefined:
quoted_message = None
self.nulls.discard("quoted_message")
elif quoted_message is None:
self.nulls.add("quoted_message")
else:
self.nulls.discard("quoted_message")
self._quoted_message = quoted_message
@property
def metadata(self):
"""Gets the metadata of this MessageWebhook. # noqa: E501
:return: The metadata of this MessageWebhook. # noqa: E501
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this MessageWebhook.
:param metadata: The metadata of this MessageWebhook. # noqa: E501
:type: object
"""
if type(metadata) is Undefined:
metadata = None
self.nulls.discard("metadata")
elif metadata is None:
self.nulls.add("metadata")
else:
self.nulls.discard("metadata")
self._metadata = metadata
@property
def deleted(self):
"""Gets the deleted of this MessageWebhook. # noqa: E501
true if the message serves as a placeholder for one that has been deleted. # noqa: E501
:return: The deleted of this MessageWebhook. # noqa: E501
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""Sets the deleted of this MessageWebhook.
true if the message serves as a placeholder for one that has been deleted. # noqa: E501
:param deleted: The deleted of this MessageWebhook. # noqa: E501
:type: bool
"""
if type(deleted) is Undefined:
deleted = None
self.nulls.discard("deleted")
elif deleted is None:
self.nulls.add("deleted")
else:
self.nulls.discard("deleted")
self._deleted = deleted
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MessageWebhook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MessageWebhook):
return True
return self.to_dict() != other.to_dict()
| 28.640719 | 203 | 0.589275 |
import pprint
import re
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class MessageWebhook(object):
openapi_types = {
'id': 'str',
'received': 'str',
'author': 'AuthorWebhook',
'content': 'Content',
'source': 'Source',
'quoted_message': 'QuotedMessage',
'metadata': 'object',
'deleted': 'bool'
}
attribute_map = {
'id': 'id',
'received': 'received',
'author': 'author',
'content': 'content',
'source': 'source',
'quoted_message': 'quotedMessage',
'metadata': 'metadata',
'deleted': 'deleted'
}
nulls = set()
def __init__(self, id=None, received=None, author=None, content=None, source=None, quoted_message=Undefined(), metadata=Undefined(), deleted=Undefined(), local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._received = None
self._author = None
self._content = None
self._source = None
self._quoted_message = None
self._metadata = None
self._deleted = None
self.discriminator = None
if id is not None:
self.id = id
if received is not None:
self.received = received
if author is not None:
self.author = author
if content is not None:
self.content = content
if source is not None:
self.source = source
self.quoted_message = quoted_message
self.metadata = metadata
self.deleted = deleted
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def received(self):
return self._received
@received.setter
def received(self, received):
self._received = received
@property
def author(self):
return self._author
@author.setter
def author(self, author):
self._author = author
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = source
@property
def quoted_message(self):
return self._quoted_message
@quoted_message.setter
def quoted_message(self, quoted_message):
if type(quoted_message) is Undefined:
quoted_message = None
self.nulls.discard("quoted_message")
elif quoted_message is None:
self.nulls.add("quoted_message")
else:
self.nulls.discard("quoted_message")
self._quoted_message = quoted_message
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
if type(metadata) is Undefined:
metadata = None
self.nulls.discard("metadata")
elif metadata is None:
self.nulls.add("metadata")
else:
self.nulls.discard("metadata")
self._metadata = metadata
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, deleted):
if type(deleted) is Undefined:
deleted = None
self.nulls.discard("deleted")
elif deleted is None:
self.nulls.add("deleted")
else:
self.nulls.discard("deleted")
self._deleted = deleted
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MessageWebhook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, MessageWebhook):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c33525922cd3446f90a3cac5e50b77916646871 | 497 | py | Python | app.py | KeisukeShima/streamlit-proj | a50c57a8f0effd484b4015fd1e027746c3b35647 | [
"MIT"
] | null | null | null | app.py | KeisukeShima/streamlit-proj | a50c57a8f0effd484b4015fd1e027746c3b35647 | [
"MIT"
] | null | null | null | app.py | KeisukeShima/streamlit-proj | a50c57a8f0effd484b4015fd1e027746c3b35647 | [
"MIT"
] | null | null | null | import streamlit as st
import plotly.graph_objs as go
animals = ['giraffes', 'orangutans', 'monkeys']
populations = [20, 14, 23]
fig = go.Figure(data=[go.Bar(x=animals, y=populations)])
fig.update_layout(
xaxis = dict(
tickangle = 0,
title_text = "Animal",
title_font = {"size": 20},
title_standoff = 25),
yaxis = dict(
title_text = "Populations",
title_standoff = 25),
title ='Title')
st.plotly_chart(fig, use_container_width=True)
| 23.666667 | 56 | 0.629779 | import streamlit as st
import plotly.graph_objs as go
animals = ['giraffes', 'orangutans', 'monkeys']
populations = [20, 14, 23]
fig = go.Figure(data=[go.Bar(x=animals, y=populations)])
fig.update_layout(
xaxis = dict(
tickangle = 0,
title_text = "Animal",
title_font = {"size": 20},
title_standoff = 25),
yaxis = dict(
title_text = "Populations",
title_standoff = 25),
title ='Title')
st.plotly_chart(fig, use_container_width=True)
| true | true |
1c3352f770946400356a22ead7df951cd76e504c | 7,267 | py | Python | stream_module.py | antofuller/configaformers | 293253cd35d96c8a24c4004ba3d24fc6dc85a260 | [
"Apache-2.0"
] | 51 | 2021-11-03T19:52:07.000Z | 2021-12-14T16:56:30.000Z | stream_module.py | muddyrains/muddy-nets | 293253cd35d96c8a24c4004ba3d24fc6dc85a260 | [
"Apache-2.0"
] | null | null | null | stream_module.py | muddyrains/muddy-nets | 293253cd35d96c8a24c4004ba3d24fc6dc85a260 | [
"Apache-2.0"
] | 2 | 2021-11-04T01:46:26.000Z | 2021-11-06T08:39:35.000Z | import torch
from torch import nn
from utils import set_default
class MakeStream(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
Make data stream
"""
# Configure input(s) and output(s)
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.input_shape = _streams[self.input_name]
assert 'output_name' in config.keys(), f"When making a stream, 'output_name' must be given!"
self.output_name = config['output_name']
# Prepare streams info
self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],
],
'outputs': [[self.output_name, self.input_shape],
]
}
def forward(self, _data):
_data[self.output_name] = _data[self.input_name].clone()
return _data
class MergeStreams(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
Merge data streams via element-wise add, subtract, or multiply
"""
# Configure input(s) and output(s)
self.input_name_1 = set_default(_look='input_name_1', _dict=config, _default='x')
self.input_name_2 = set_default(_look='input_name_2', _dict=config, _default='x')
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
self.merge_name = set_default(_look='merge_type', _dict=config, _default='add')
self.input_shape_1 = _streams[self.input_name_1]
self.input_shape_2 = _streams[self.input_name_2]
assert (self.merge_name == 'add') or (self.merge_name == 'multiply') or (self.merge_name == 'subtract'), \
f"Merge stream operations available are: 'add', 'multiply', and 'subtract'!"
if len(self.input_shape_1) < len(self.input_shape_2):
self.output_shape = self.input_shape_2
else:
self.output_shape = self.input_shape_1
# Prepare streams info
self.streams_in_module = {'inputs': [[self.input_name_1, self.input_shape_1],
[self.input_name_2, self.input_shape_2],
],
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
if self.merge_name == 'add':
_data[self.output_name] = _data[self.input_name_1] + _data[self.input_name_2]
elif self.merge_name == 'subtract':
_data[self.output_name] = _data[self.input_name_1] - _data[self.input_name_2]
elif self.merge_name == 'multiply':
_data[self.output_name] = _data[self.input_name_1] * _data[self.input_name_2]
else:
print(f'{self.merge_name} did not match any options.')
return _data
class CutStream(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
IN TESTING
Cut data stream
"""
# Configure input(s) and output(s)
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
assert 'start' in config.keys(), f"Cut_stream must be given a starting index!"
assert 'end' in config.keys(), f"Cut_stream must be given an ending index!"
assert 'cut_dim' in config.keys(), f"Cut_stream must be given a dimension which is cut!"
self.start = config['start']
self.end = config['end']
self.cut_dim = config['cut_dim']
self.input_shape = _streams[self.input_name]
if (type(self.start) == int) and (type(self.end) == int):
cut_dim_output = self.end - self.start
elif self.start == 0:
cut_dim_output = self.end
else:
cut_dim_output = f"{self.end} - {self.start}"
self.output_shape = self.input_shape.copy()
self.output_shape[self.cut_dim] = cut_dim_output
# Prepare streams info
self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],
],
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
if type(self.start) == int:
start_idx = self.start
else:
start_idx = _data['input_sizes'][self.start]
if type(self.end) == int:
end_idx = self.end
else:
end_idx = _data['input_sizes'][self.end]
if self.cut_dim == 0:
_data[self.output_name] = _data[self.input_name][start_idx:end_idx, ...]
elif self.cut_dim == 1:
_data[self.output_name] = _data[self.input_name][:, start_idx:end_idx, ...]
elif self.cut_dim == 2:
_data[self.output_name] = _data[self.input_name][:, :, start_idx:end_idx, ...]
elif self.cut_dim == 3:
_data[self.output_name] = _data[self.input_name][:, :, :, start_idx:end_idx]
else:
print('cut_stream only supports up to 4 dimensional data')
return _data
class CatStreams(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
IN TESTING
Concatenate data streams
"""
# Configure input(s) and output(s)
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
assert 'input_list' in config.keys(), f"Cat_streams must be given 'input_list'!"
assert 'cat_dim' in config.keys(), f"Cat_streams must be given 'cat_dim'"
self.input_list = config['input_list']
self.cat_dim = config['cat_dim']
input_streams = []
cat_dim_out = 0
for input_name in self.input_list:
input_shape = _streams[input_name]
input_streams.append([input_name, input_shape])
cat_dim_out += input_shape[self.cat_dim]
self.output_shape = input_streams[0][1].copy() # copy the shape of the first input stream
self.output_shape[self.cat_dim] = cat_dim_out # update the dimension that is concatenated
# Prepare streams info
self.streams_in_module = {'inputs': input_streams,
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
# collect input streams
temp_input_list = []
for input_name in self.input_list:
temp_input_list.append(_data[input_name])
_data[self.output_name] = torch.cat(temp_input_list, dim=self.cat_dim)
del temp_input_list
return _data | 36.70202 | 114 | 0.553874 | import torch
from torch import nn
from utils import set_default
class MakeStream(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.input_shape = _streams[self.input_name]
assert 'output_name' in config.keys(), f"When making a stream, 'output_name' must be given!"
self.output_name = config['output_name']
self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],
],
'outputs': [[self.output_name, self.input_shape],
]
}
def forward(self, _data):
_data[self.output_name] = _data[self.input_name].clone()
return _data
class MergeStreams(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
self.input_name_1 = set_default(_look='input_name_1', _dict=config, _default='x')
self.input_name_2 = set_default(_look='input_name_2', _dict=config, _default='x')
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
self.merge_name = set_default(_look='merge_type', _dict=config, _default='add')
self.input_shape_1 = _streams[self.input_name_1]
self.input_shape_2 = _streams[self.input_name_2]
assert (self.merge_name == 'add') or (self.merge_name == 'multiply') or (self.merge_name == 'subtract'), \
f"Merge stream operations available are: 'add', 'multiply', and 'subtract'!"
if len(self.input_shape_1) < len(self.input_shape_2):
self.output_shape = self.input_shape_2
else:
self.output_shape = self.input_shape_1
self.streams_in_module = {'inputs': [[self.input_name_1, self.input_shape_1],
[self.input_name_2, self.input_shape_2],
],
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
if self.merge_name == 'add':
_data[self.output_name] = _data[self.input_name_1] + _data[self.input_name_2]
elif self.merge_name == 'subtract':
_data[self.output_name] = _data[self.input_name_1] - _data[self.input_name_2]
elif self.merge_name == 'multiply':
_data[self.output_name] = _data[self.input_name_1] * _data[self.input_name_2]
else:
print(f'{self.merge_name} did not match any options.')
return _data
class CutStream(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
assert 'start' in config.keys(), f"Cut_stream must be given a starting index!"
assert 'end' in config.keys(), f"Cut_stream must be given an ending index!"
assert 'cut_dim' in config.keys(), f"Cut_stream must be given a dimension which is cut!"
self.start = config['start']
self.end = config['end']
self.cut_dim = config['cut_dim']
self.input_shape = _streams[self.input_name]
if (type(self.start) == int) and (type(self.end) == int):
cut_dim_output = self.end - self.start
elif self.start == 0:
cut_dim_output = self.end
else:
cut_dim_output = f"{self.end} - {self.start}"
self.output_shape = self.input_shape.copy()
self.output_shape[self.cut_dim] = cut_dim_output
self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],
],
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
if type(self.start) == int:
start_idx = self.start
else:
start_idx = _data['input_sizes'][self.start]
if type(self.end) == int:
end_idx = self.end
else:
end_idx = _data['input_sizes'][self.end]
if self.cut_dim == 0:
_data[self.output_name] = _data[self.input_name][start_idx:end_idx, ...]
elif self.cut_dim == 1:
_data[self.output_name] = _data[self.input_name][:, start_idx:end_idx, ...]
elif self.cut_dim == 2:
_data[self.output_name] = _data[self.input_name][:, :, start_idx:end_idx, ...]
elif self.cut_dim == 3:
_data[self.output_name] = _data[self.input_name][:, :, :, start_idx:end_idx]
else:
print('cut_stream only supports up to 4 dimensional data')
return _data
class CatStreams(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
assert 'input_list' in config.keys(), f"Cat_streams must be given 'input_list'!"
assert 'cat_dim' in config.keys(), f"Cat_streams must be given 'cat_dim'"
self.input_list = config['input_list']
self.cat_dim = config['cat_dim']
input_streams = []
cat_dim_out = 0
for input_name in self.input_list:
input_shape = _streams[input_name]
input_streams.append([input_name, input_shape])
cat_dim_out += input_shape[self.cat_dim]
self.output_shape = input_streams[0][1].copy()
self.output_shape[self.cat_dim] = cat_dim_out
self.streams_in_module = {'inputs': input_streams,
'outputs': [[self.output_name, self.output_shape],
]
}
def forward(self, _data):
temp_input_list = []
for input_name in self.input_list:
temp_input_list.append(_data[input_name])
_data[self.output_name] = torch.cat(temp_input_list, dim=self.cat_dim)
del temp_input_list
return _data | true | true |
1c3353193d455c706d4e720bf2bc76ea6e47d636 | 10,765 | py | Python | 86833/tanks_test1.py | PlatovDmitriy/Tanks | 62e16e5da8325a1ca22f21a474f97afc682f546e | [
"CC0-1.0"
] | null | null | null | 86833/tanks_test1.py | PlatovDmitriy/Tanks | 62e16e5da8325a1ca22f21a474f97afc682f546e | [
"CC0-1.0"
] | null | null | null | 86833/tanks_test1.py | PlatovDmitriy/Tanks | 62e16e5da8325a1ca22f21a474f97afc682f546e | [
"CC0-1.0"
] | null | null | null | from pygame import *
from time import time as timer
x = 10
y = 10
x1 = 500
y1 = 500
font.init()
font1 = font.SysFont('Arial',36)
BLACK = (0,0,0)
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, x_speed, y_speed):
super().__init__()
self.image = transform.scale(image.load(player_image),(65,65))
self.x_speed = x_speed #добавила скорость по x и по y
self.y_speed = y_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
#движение по горизонтали
global x
self.rect.x += self.x_speed
x += self.x_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.x_speed > 0:
for p in platforms_touched:
self.rect.right = min(self.rect.right, p.rect.left)
elif self.x_speed < 0:
for p in platforms_touched:
self.rect.left = max(self.rect.left, p.rect.right)
#движение по вертикали
global y
self.rect.y += self.y_speed
y += self.y_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.y_speed > 0:
for p in platforms_touched:
self.rect.bottom = min(self.rect.bottom, p.rect.top)
elif self.y_speed < 0:
for p in platforms_touched:
self.rect.top = max(self.rect.top, p.rect.bottom)
def shoot(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets.append(bullet)
def shoot2(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets4.append(bullet)
def shoot3(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets2.append(bullet)
def shoot4(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets3.append(bullet)
class Bullet(GameSprite):
def update(self):
self.rect.y += self.y_speed
def update_2(self):
self.rect.y -= self.y_speed
def update_3(self):
self.rect.x += self.x_speed
def update_4(self):
self.rect.x -= self.x_speed
class Player2(GameSprite):
def update_45(self):
self.rect.x += self.x_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.x_speed > 0:
for p in platforms_touched:
self.rect.right = min(self.rect.right, p.rect.left)
elif self.x_speed < 0:
for p in platforms_touched:
self.rect.left = max(self.rect.left, p.rect.right)
#движение по вертикали
self.rect.y += self.y_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.y_speed > 0:
for p in platforms_touched:
self.rect.bottom = min(self.rect.bottom, p.rect.top)
elif self.y_speed < 0:
for p in platforms_touched:
self.rect.top = max(self.rect.top, p.rect.bottom)
def shoot_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets_2.append(bullet2)
def shoot2_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets4_2.append(bullet2)
def shoot3_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets2_2.append(bullet2)
def shoot4_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets3_2.append(bullet2)
class Bullet2(GameSprite):
def update_2(self):
self.rect.y += self.speed
def update_2_2(self):
self.rect.y -= self.speed
def update_3_2(self):
self.rect.x += self.speed
def update_4_2(self):
self.rect.x -= self.speed
class Walls(sprite.Sprite):
def __init__(self,name,cor_x, cor_y ):
super().__init__ ()
self.image = transform.scale(image.load(name),(65,65))
self.rect = self.image.get_rect()
self.rect.x = cor_x
self.rect.y = cor_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
rec_time = False
num_fire = 0
rec_time2 = False
num_fire2 = 0
shooot = 'shot.jpg'
bulls = sprite.Group()
bulls2 = sprite.Group()
#bullets2 = sprite.Group()
#bullets3 = sprite.Group()
#bullets4 = sprite.Group()
f = 1
wallsg = sprite.Group()
bullets = []
bullets2 = []
bullets3 = []
bullets4 = []
bullets_2 = []
bullets2_2 = []
bullets3_2 = []
bullets4_2 = []
sc = 0
rg = 0
rg1 = 0
game = True
clock = time.Clock()
FPS = 120
finish = False
w = 1900
h = 1000
top = 'tank_player_1_top.png'
down = 'tank_player_1_down.png'
left = 'tank_player_1_left.png'
right = 'tank_player_1_right.png'
#sv = 0
#top1 = 'top.png'
#down1 = 'down.png'
#left1 = 'left.png'
window = display.set_mode((w,h))
background = transform.scale(image.load('background1.jpg'),(w,h))
player_1 = Player(down,10,10,0,0)
player_2 = Player2(down,500,500,0,0)
wall1 = Walls('block.png',150,150)
wall2 = Walls('block.png',150,85)
wall3 = Walls('block.png',150,215)
wall4 = Walls('block.png',150,280)
wall5 = Walls('block.png',150,345)
wall6 = Walls('block.png',150,410)
wall7 = Walls('block.png',150,475)
wall8 = Walls('block.png',150,540)
wallsg.add(wall1)
wallsg.add(wall2)
wallsg.add(wall3)
wallsg.add(wall4)
wallsg.add(wall5)
wallsg.add(wall6)
wallsg.add(wall7)
wallsg.add(wall8)
while game:
for e in event.get():
if e.type == QUIT:
game = False
elif e.type == KEYDOWN:
if e.key == K_SPACE:
if num_fire < 1 and rec_time == False:
num_fire += 1
if e.key == K_SPACE and rg == 1:
player_1.shoot4()
elif e.key == K_SPACE and rg == 2:
player_1.shoot2()
elif e.key == K_SPACE and rg == 3:
player_1.shoot()
elif e.key == K_SPACE and rg == 4:
player_1.shoot3()
if e.key == K_a:
player_1.x_speed = -10
player_1.image = transform.scale(image.load(left),(65,65))
rg = 2
elif e.key == K_d:
player_1.x_speed = 10
player_1.image = transform.scale(image.load(right),(65,65))
rg = 1
elif e.key == K_w:
player_1.y_speed = -10
player_1.image = transform.scale(image.load(top),(65,65))
rg = 4
elif e.key == K_s:
player_1.y_speed = 10
player_1.image = transform.scale(image.load(down),(65,65))
rg = 3
elif e.type == KEYUP:
if e.key == K_a:
player_1.x_speed = 0
elif e.key == K_d:
player_1.x_speed = 0
elif e.key == K_w:
player_1.y_speed = 0
elif e.key == K_s:
player_1.y_speed = 0
if num_fire >= 1 and rec_time == False:
last_time = timer()
rec_time = True
if e.key == K_SPACE:
if num_fire2 < 1 and rec_time2 == False:
num_fire2 += 1
if e.key == K_RSHIFT and sc == 1:
player_2.shoot4_2()
elif e.key == K_RSHIFT and sc == 2:
player_2.shoot2_2()
elif e.key == K_RSHIFT and sc == 3:
player_2.shoot_2()
elif e.key == K_RSHIFT and sc == 4:
player_2.shoot3_2()
if num_fire2 >= 1 and rec_time2 == False:
last_time2 = timer()
rec_time2 = True
if finish != True:
window.blit(background,(0,0))
player_1.reset()
player_1.update()
player_2.reset()
player_2.update_45()
wallsg.draw(window)
bulls.draw(window)
bulls2.draw(window)
for bul in bullets:
if bul.rect.y >= -65 and bul.rect.y <= h + 65:
bul.update()
#bulls.add(bul)
for bul in bullets2:
if bul.rect.y >= -65 and bul.rect.y <= h + 65:
bul.update_2()
#bulls.add(bul)
for bul in bullets3:
if bul.rect.x >= -65 and bul.rect.x <= w + 65:
bul.update_3()
#bulls.add(bul)
for bul in bullets4:
if bul.rect.x >= -65 and bul.rect.x <= w + 65:
bul.update_4()
#bulls.add(bul)
collides = sprite.groupcollide(bulls, wallsg, True,True)
for bult in bullets_2:
if bult.rect.y >= -65 and bult.rect.y <= h + 65:
bult.update_2()
#bulls.add(bul)
for bult in bullets2_2:
if bult.rect.y >= -65 and bult.rect.y <= h + 65:
bult.update_2_2()
#bulls.add(bul)
for bult in bullets3_2:
if bult.rect.x >= -65 and bult.rect.x <= w + 65:
bult.update_3_2()
#bulls.add(bul)
for bult in bullets4_2:
if bult.rect.x >= -65 and bult.rect.x <= w + 65:
bult.update_4_2()
#bulls.add(bul)
collides2 = sprite.groupcollide(bulls2, wallsg, True,True)
if rec_time == True:
now_time = timer()
if now_time - last_time < 1.5:
text_reload = font1.render('Перезарядка',1,(200,0,0))
window.blit(text_reload,(260,460))
else:
num_fire = 0
rec_time = False
if rec_time2 == True:
now_time2 = timer()
if now_time2 - last_time < 1.5:
text_reload2 = font1.render('Перезарядка',1,(200,0,0))
window.blit(text_reload2,(600,460))
else:
num_fire2 = 0
rec_time2 = False
display.update()
clock.tick(FPS)
| 32.621212 | 76 | 0.51203 | from pygame import *
from time import time as timer
x = 10
y = 10
x1 = 500
y1 = 500
font.init()
font1 = font.SysFont('Arial',36)
BLACK = (0,0,0)
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, x_speed, y_speed):
super().__init__()
self.image = transform.scale(image.load(player_image),(65,65))
self.x_speed = x_speed
self.y_speed = y_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
global x
self.rect.x += self.x_speed
x += self.x_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.x_speed > 0:
for p in platforms_touched:
self.rect.right = min(self.rect.right, p.rect.left)
elif self.x_speed < 0:
for p in platforms_touched:
self.rect.left = max(self.rect.left, p.rect.right)
global y
self.rect.y += self.y_speed
y += self.y_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.y_speed > 0:
for p in platforms_touched:
self.rect.bottom = min(self.rect.bottom, p.rect.top)
elif self.y_speed < 0:
for p in platforms_touched:
self.rect.top = max(self.rect.top, p.rect.bottom)
def shoot(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets.append(bullet)
def shoot2(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets4.append(bullet)
def shoot3(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets2.append(bullet)
def shoot4(self):
bullet = Bullet('ener.png',x,y,20,20)
bulls.add(bullet)
bullets3.append(bullet)
class Bullet(GameSprite):
def update(self):
self.rect.y += self.y_speed
def update_2(self):
self.rect.y -= self.y_speed
def update_3(self):
self.rect.x += self.x_speed
def update_4(self):
self.rect.x -= self.x_speed
class Player2(GameSprite):
def update_45(self):
self.rect.x += self.x_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.x_speed > 0:
for p in platforms_touched:
self.rect.right = min(self.rect.right, p.rect.left)
elif self.x_speed < 0:
for p in platforms_touched:
self.rect.left = max(self.rect.left, p.rect.right)
self.rect.y += self.y_speed
platforms_touched = sprite.spritecollide(self, wallsg, False)
if self.y_speed > 0:
for p in platforms_touched:
self.rect.bottom = min(self.rect.bottom, p.rect.top)
elif self.y_speed < 0:
for p in platforms_touched:
self.rect.top = max(self.rect.top, p.rect.bottom)
def shoot_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets_2.append(bullet2)
def shoot2_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets4_2.append(bullet2)
def shoot3_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets2_2.append(bullet2)
def shoot4_2(self):
bullet2 = Bullet2('ener.png',x1,y1,20)
bulls2.add(bullet2)
bullets3_2.append(bullet2)
class Bullet2(GameSprite):
def update_2(self):
self.rect.y += self.speed
def update_2_2(self):
self.rect.y -= self.speed
def update_3_2(self):
self.rect.x += self.speed
def update_4_2(self):
self.rect.x -= self.speed
class Walls(sprite.Sprite):
def __init__(self,name,cor_x, cor_y ):
super().__init__ ()
self.image = transform.scale(image.load(name),(65,65))
self.rect = self.image.get_rect()
self.rect.x = cor_x
self.rect.y = cor_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
rec_time = False
num_fire = 0
rec_time2 = False
num_fire2 = 0
shooot = 'shot.jpg'
bulls = sprite.Group()
bulls2 = sprite.Group()
f = 1
wallsg = sprite.Group()
bullets = []
bullets2 = []
bullets3 = []
bullets4 = []
bullets_2 = []
bullets2_2 = []
bullets3_2 = []
bullets4_2 = []
sc = 0
rg = 0
rg1 = 0
game = True
clock = time.Clock()
FPS = 120
finish = False
w = 1900
h = 1000
top = 'tank_player_1_top.png'
down = 'tank_player_1_down.png'
left = 'tank_player_1_left.png'
right = 'tank_player_1_right.png'
window = display.set_mode((w,h))
background = transform.scale(image.load('background1.jpg'),(w,h))
player_1 = Player(down,10,10,0,0)
player_2 = Player2(down,500,500,0,0)
wall1 = Walls('block.png',150,150)
wall2 = Walls('block.png',150,85)
wall3 = Walls('block.png',150,215)
wall4 = Walls('block.png',150,280)
wall5 = Walls('block.png',150,345)
wall6 = Walls('block.png',150,410)
wall7 = Walls('block.png',150,475)
wall8 = Walls('block.png',150,540)
wallsg.add(wall1)
wallsg.add(wall2)
wallsg.add(wall3)
wallsg.add(wall4)
wallsg.add(wall5)
wallsg.add(wall6)
wallsg.add(wall7)
wallsg.add(wall8)
while game:
for e in event.get():
if e.type == QUIT:
game = False
elif e.type == KEYDOWN:
if e.key == K_SPACE:
if num_fire < 1 and rec_time == False:
num_fire += 1
if e.key == K_SPACE and rg == 1:
player_1.shoot4()
elif e.key == K_SPACE and rg == 2:
player_1.shoot2()
elif e.key == K_SPACE and rg == 3:
player_1.shoot()
elif e.key == K_SPACE and rg == 4:
player_1.shoot3()
if e.key == K_a:
player_1.x_speed = -10
player_1.image = transform.scale(image.load(left),(65,65))
rg = 2
elif e.key == K_d:
player_1.x_speed = 10
player_1.image = transform.scale(image.load(right),(65,65))
rg = 1
elif e.key == K_w:
player_1.y_speed = -10
player_1.image = transform.scale(image.load(top),(65,65))
rg = 4
elif e.key == K_s:
player_1.y_speed = 10
player_1.image = transform.scale(image.load(down),(65,65))
rg = 3
elif e.type == KEYUP:
if e.key == K_a:
player_1.x_speed = 0
elif e.key == K_d:
player_1.x_speed = 0
elif e.key == K_w:
player_1.y_speed = 0
elif e.key == K_s:
player_1.y_speed = 0
if num_fire >= 1 and rec_time == False:
last_time = timer()
rec_time = True
if e.key == K_SPACE:
if num_fire2 < 1 and rec_time2 == False:
num_fire2 += 1
if e.key == K_RSHIFT and sc == 1:
player_2.shoot4_2()
elif e.key == K_RSHIFT and sc == 2:
player_2.shoot2_2()
elif e.key == K_RSHIFT and sc == 3:
player_2.shoot_2()
elif e.key == K_RSHIFT and sc == 4:
player_2.shoot3_2()
if num_fire2 >= 1 and rec_time2 == False:
last_time2 = timer()
rec_time2 = True
if finish != True:
window.blit(background,(0,0))
player_1.reset()
player_1.update()
player_2.reset()
player_2.update_45()
wallsg.draw(window)
bulls.draw(window)
bulls2.draw(window)
for bul in bullets:
if bul.rect.y >= -65 and bul.rect.y <= h + 65:
bul.update()
for bul in bullets2:
if bul.rect.y >= -65 and bul.rect.y <= h + 65:
bul.update_2()
for bul in bullets3:
if bul.rect.x >= -65 and bul.rect.x <= w + 65:
bul.update_3()
for bul in bullets4:
if bul.rect.x >= -65 and bul.rect.x <= w + 65:
bul.update_4()
collides = sprite.groupcollide(bulls, wallsg, True,True)
for bult in bullets_2:
if bult.rect.y >= -65 and bult.rect.y <= h + 65:
bult.update_2()
for bult in bullets2_2:
if bult.rect.y >= -65 and bult.rect.y <= h + 65:
bult.update_2_2()
for bult in bullets3_2:
if bult.rect.x >= -65 and bult.rect.x <= w + 65:
bult.update_3_2()
for bult in bullets4_2:
if bult.rect.x >= -65 and bult.rect.x <= w + 65:
bult.update_4_2()
collides2 = sprite.groupcollide(bulls2, wallsg, True,True)
if rec_time == True:
now_time = timer()
if now_time - last_time < 1.5:
text_reload = font1.render('Перезарядка',1,(200,0,0))
window.blit(text_reload,(260,460))
else:
num_fire = 0
rec_time = False
if rec_time2 == True:
now_time2 = timer()
if now_time2 - last_time < 1.5:
text_reload2 = font1.render('Перезарядка',1,(200,0,0))
window.blit(text_reload2,(600,460))
else:
num_fire2 = 0
rec_time2 = False
display.update()
clock.tick(FPS)
| true | true |
1c33540ebf27a45236b9d7996d28384d42f3ce87 | 350 | py | Python | examples/cli_examples/set_sort_groups_by_shank.py | LorenFrankLab/SpyGlass | 5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41 | [
"MIT"
] | 1 | 2022-03-22T12:13:18.000Z | 2022-03-22T12:13:18.000Z | examples/cli_examples/set_sort_groups_by_shank.py | LorenFrankLab/SpyGlass | 5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41 | [
"MIT"
] | 17 | 2022-03-22T14:42:04.000Z | 2022-03-31T23:58:39.000Z | examples/cli_examples/set_sort_groups_by_shank.py | LorenFrankLab/SpyGlass | 5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41 | [
"MIT"
] | 1 | 2022-03-23T20:04:25.000Z | 2022-03-23T20:04:25.000Z | #!/usr/bin/env python3
import spyglass.common as sgc
nwb_file_name = 'RN2_20191110_.nwb'
sgc.SortGroup().set_group_by_shank(
nwb_file_name=nwb_file_name,
references=None,
omit_ref_electrode_group=False
)
print(sgc.SortGroup & {'nwb_file_name': nwb_file_name})
print(sgc.SortGroup.SortGroupElectrode & {'nwb_file_name': nwb_file_name}) | 25 | 74 | 0.78 |
import spyglass.common as sgc
nwb_file_name = 'RN2_20191110_.nwb'
sgc.SortGroup().set_group_by_shank(
nwb_file_name=nwb_file_name,
references=None,
omit_ref_electrode_group=False
)
print(sgc.SortGroup & {'nwb_file_name': nwb_file_name})
print(sgc.SortGroup.SortGroupElectrode & {'nwb_file_name': nwb_file_name}) | true | true |
1c3354f45e59b7e88ce6f4d0e8f69626393a1065 | 1,888 | py | Python | mmdet/datasets/pipelines/__init__.py | HT-hlf/mmdetection_miner-2.22.0 | 76eb94d6547f9f95cd58f41bb5c91941e82322b9 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/pipelines/__init__.py | HT-hlf/mmdetection_miner-2.22.0 | 76eb94d6547f9f95cd58f41bb5c91941e82322b9 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/pipelines/__init__.py | HT-hlf/mmdetection_miner-2.22.0 | 76eb94d6547f9f95cd58f41bb5c91941e82322b9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile,LoadImageFromFile_depth,LoadImageFromFile_rgb_depth, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion,PhotoMetricDistortion_rgb_depth, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromFile_depth','LoadImageFromFile_rgb_depth','LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'PhotoMetricDistortion_rgb_depth','Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
| 62.933333 | 130 | 0.711864 |
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile,LoadImageFromFile_depth,LoadImageFromFile_rgb_depth, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion,PhotoMetricDistortion_rgb_depth, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromFile_depth','LoadImageFromFile_rgb_depth','LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'PhotoMetricDistortion_rgb_depth','Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
| true | true |
1c33550df5289d9baea0397f15634c63d4cc3235 | 22,474 | py | Python | sympy/functions/elementary/tests/test_complexes.py | pbrady/sympy | 7163137e22fbefbb645147a15147b26dad220d49 | [
"BSD-3-Clause"
] | 1 | 2019-06-27T13:40:28.000Z | 2019-06-27T13:40:28.000Z | sympy/functions/elementary/tests/test_complexes.py | amitsaha/sympy | 43ddfc644fd604a3dc0d4cac0aebfecd051917c1 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/tests/test_complexes.py | amitsaha/sympy | 43ddfc644fd604a3dc0d4cac0aebfecd051917c1 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise,
Interval, comp
)
from sympy.utilities.pytest import XFAIL, raises
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
assert re(x).rewrite(im) == x - im(x)
assert (x + re(y)).rewrite(re, im) == x + y - im(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
assert im(x).rewrite(re) == x - re(x)
assert (x + im(y)).rewrite(im, re) == x + y - re(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
assert sign(im(sqrt(1 - sqrt(3)))) == 1
assert sign(sqrt(1 - sqrt(3))) == I
x = Symbol('x')
assert sign(x).is_finite is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
assert sign(Symbol('x', nonnegative=True)).is_nonnegative
assert sign(Symbol('x', nonnegative=True)).is_nonpositive is None
assert sign(Symbol('x', nonpositive=True)).is_nonnegative is None
assert sign(Symbol('x', nonpositive=True)).is_nonpositive
assert sign(Symbol('x', real=True)).is_nonnegative is None
assert sign(Symbol('x', real=True)).is_nonpositive is None
assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
assert sign(x).rewrite(Heaviside) == 2*Heaviside(x)-1
assert sign(y).rewrite(Heaviside) == sign(y)
# evaluate what can be evaluated
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert sign(eq).func is sign or sign(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 6261
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 3853
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, abs(i))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
raises(TypeError, lambda: Abs(Interval(2, 3))) # issue 8717
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
assert Abs(x)**-3 == Abs(x)/(x**4)
assert Abs(x**3) == x**2*Abs(x)
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when you can and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert abs(eq).func is Abs or abs(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
assert Abs(4*exp(pi*I/4)) == 4
assert Abs(3**(2 + I)) == 9
assert Abs((-3)**(1 - I)) == 3*exp(pi)
assert Abs(oo) is oo
assert Abs(-oo) is oo
assert Abs(oo + I) is oo
assert Abs(oo + I*oo) is oo
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
assert Abs(y).rewrite(sign) == y/sign(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_rational is None
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
z = Symbol('z', complex=True, zero=False)
assert Abs(z).is_real is True
assert Abs(z).is_rational is None
assert Abs(z).is_positive is True
assert Abs(z).is_zero is False
p = Symbol('p', positive=True)
assert Abs(p).is_real is True
assert Abs(p).is_rational is None
assert Abs(p).is_positive is True
assert Abs(p).is_zero is False
q = Symbol('q', rational=True)
assert Abs(q).is_rational is True
assert Abs(q).is_integer is None
assert Abs(q).is_positive is None
assert Abs(q).is_nonnegative is True
i = Symbol('i', integer=True)
assert Abs(i).is_integer is True
assert Abs(i).is_positive is None
assert Abs(i).is_nonnegative is True
e = Symbol('n', even=True)
ne = Symbol('ne', real=True, even=False)
assert Abs(e).is_even
assert Abs(ne).is_even is False
assert Abs(i).is_even is None
o = Symbol('n', odd=True)
no = Symbol('no', real=True, odd=False)
assert Abs(o).is_odd
assert Abs(no).is_odd is False
assert Abs(i).is_odd is None
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
f = Function('f')
assert not arg(f(0) + I*f(1)).atoms(re)
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
e = p + I*p**2
assert arg(e) == arg(1 + p*I)
# make sure sign doesn't swap
e = -2*p + 4*I*p**2
assert arg(e) == arg(-1 + 2*p*I)
# make sure sign isn't lost
x = symbols('x', real=True) # could be zero
e = x + I*x
assert arg(e) == arg(x*(1 + I))
assert arg(e/p) == arg(x*(1 + I))
e = p*cos(p) + I*log(p)*exp(p)
assert arg(e).args[0] == e
# keep it simple -- let the user do more advanced cancellation
e = (p + 1) + I*(p**2 - 1)
assert arg(e).args[0] == e
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue_4757():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| 31.300836 | 79 | 0.581249 | from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise,
Interval, comp
)
from sympy.utilities.pytest import XFAIL, raises
def N_equals(a, b):
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
assert re(x).rewrite(im) == x - im(x)
assert (x + re(y)).rewrite(re, im) == x + y - im(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
assert im(x).rewrite(re) == x - re(x)
assert (x + im(y)).rewrite(im, re) == x + y - re(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
assert sign(im(sqrt(1 - sqrt(3)))) == 1
assert sign(sqrt(1 - sqrt(3))) == I
x = Symbol('x')
assert sign(x).is_finite is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
assert sign(Symbol('x', nonnegative=True)).is_nonnegative
assert sign(Symbol('x', nonnegative=True)).is_nonpositive is None
assert sign(Symbol('x', nonpositive=True)).is_nonnegative is None
assert sign(Symbol('x', nonpositive=True)).is_nonpositive
assert sign(Symbol('x', real=True)).is_nonnegative is None
assert sign(Symbol('x', real=True)).is_nonpositive is None
assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
assert sign(x).rewrite(Heaviside) == 2*Heaviside(x)-1
assert sign(y).rewrite(Heaviside) == sign(y)
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
assert sign(eq).func is sign or sign(eq) == 0
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
assert n.as_real_imag() == (n, 0)
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, abs(i))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
raises(TypeError, lambda: Abs(Interval(2, 3)))
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x)
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n)
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
assert Abs(x)**-3 == Abs(x)/(x**4)
assert Abs(x**3) == x**2*Abs(x)
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
assert abs(eq).func is Abs or abs(eq) == 0
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
assert Abs(4*exp(pi*I/4)) == 4
assert Abs(3**(2 + I)) == 9
assert Abs((-3)**(1 - I)) == 3*exp(pi)
assert Abs(oo) is oo
assert Abs(-oo) is oo
assert Abs(oo + I) is oo
assert Abs(oo + I*oo) is oo
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
assert Abs(y).rewrite(sign) == y/sign(y)
def test_Abs_real():
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_rational is None
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
z = Symbol('z', complex=True, zero=False)
assert Abs(z).is_real is True
assert Abs(z).is_rational is None
assert Abs(z).is_positive is True
assert Abs(z).is_zero is False
p = Symbol('p', positive=True)
assert Abs(p).is_real is True
assert Abs(p).is_rational is None
assert Abs(p).is_positive is True
assert Abs(p).is_zero is False
q = Symbol('q', rational=True)
assert Abs(q).is_rational is True
assert Abs(q).is_integer is None
assert Abs(q).is_positive is None
assert Abs(q).is_nonnegative is True
i = Symbol('i', integer=True)
assert Abs(i).is_integer is True
assert Abs(i).is_positive is None
assert Abs(i).is_nonnegative is True
e = Symbol('n', even=True)
ne = Symbol('ne', real=True, even=False)
assert Abs(e).is_even
assert Abs(ne).is_even is False
assert Abs(i).is_even is None
o = Symbol('n', odd=True)
no = Symbol('no', real=True, odd=False)
assert Abs(o).is_odd
assert Abs(no).is_odd is False
assert Abs(i).is_odd is None
def test_abs():
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
f = Function('f')
assert not arg(f(0) + I*f(1)).atoms(re)
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
e = p + I*p**2
assert arg(e) == arg(1 + p*I)
# make sure sign doesn't swap
e = -2*p + 4*I*p**2
assert arg(e) == arg(-1 + 2*p*I)
x = symbols('x', real=True) # could be zero
e = x + I*x
assert arg(e) == arg(x*(1 + I))
assert arg(e/p) == arg(x*(1 + I))
e = p*cos(p) + I*log(p)*exp(p)
assert arg(e).args[0] == e
# keep it simple -- let the user do more advanced cancellation
e = (p + 1) + I*(p**2 - 1)
assert arg(e).args[0] == e
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue_4757():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| true | true |
1c33561b46f8ec90a73d5b7fa4f7967740fc7fcc | 1,509 | py | Python | autorest/python/emsapi/models/adi_ems_web_api_v2_dto_parameter_set_parameter_set_group.py | ge-flight-analytics/ems-api-wrappers | 5e787e0cbc72e7a3b06fa83ff6ba07968231f89c | [
"MIT"
] | 2 | 2017-02-20T18:32:02.000Z | 2018-08-01T11:45:29.000Z | autorest/python/emsapi/models/adi_ems_web_api_v2_dto_parameter_set_parameter_set_group.py | ge-flight-analytics/ems-api-wrappers | 5e787e0cbc72e7a3b06fa83ff6ba07968231f89c | [
"MIT"
] | 10 | 2017-02-20T16:17:04.000Z | 2019-04-02T16:52:49.000Z | autorest/python/emsapi/models/adi_ems_web_api_v2_dto_parameter_set_parameter_set_group.py | ge-flight-analytics/ems-api-wrappers | 5e787e0cbc72e7a3b06fa83ff6ba07968231f89c | [
"MIT"
] | 2 | 2017-02-18T23:22:20.000Z | 2017-02-20T19:35:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoParameterSetParameterSetGroup(Model):
"""A container for parameter sets.
:param name: The name of the group.
:type name: str
:param group_id: The id of the group. This should be a relative path.
:type group_id: str
:param groups: An array of groups contained by this group.
:type groups:
list[~emsapi.models.AdiEmsWebApiV2DtoParameterSetParameterSetGroup]
:param sets: An array of parameter sets contained by this group.
:type sets: list[~emsapi.models.AdiEmsWebApiV2DtoParameterSetParameterSet]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[AdiEmsWebApiV2DtoParameterSetParameterSetGroup]'},
'sets': {'key': 'sets', 'type': '[AdiEmsWebApiV2DtoParameterSetParameterSet]'},
}
def __init__(self, name=None, group_id=None, groups=None, sets=None):
super(AdiEmsWebApiV2DtoParameterSetParameterSetGroup, self).__init__()
self.name = name
self.group_id = group_id
self.groups = groups
self.sets = sets
| 39.710526 | 96 | 0.617628 |
from msrest.serialization import Model
class AdiEmsWebApiV2DtoParameterSetParameterSetGroup(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[AdiEmsWebApiV2DtoParameterSetParameterSetGroup]'},
'sets': {'key': 'sets', 'type': '[AdiEmsWebApiV2DtoParameterSetParameterSet]'},
}
def __init__(self, name=None, group_id=None, groups=None, sets=None):
super(AdiEmsWebApiV2DtoParameterSetParameterSetGroup, self).__init__()
self.name = name
self.group_id = group_id
self.groups = groups
self.sets = sets
| true | true |
1c3356481cbd526a3b1e27b3157fe0a9488be0bf | 7,356 | py | Python | lib/streamlit/elements/multiselect.py | AnOctopus/streamlit | 6c5384f62c1415538347fa751185e5c487673f82 | [
"Apache-2.0"
] | 19,099 | 2019-08-25T14:00:15.000Z | 2022-03-31T21:00:28.000Z | lib/streamlit/elements/multiselect.py | AnOctopus/streamlit | 6c5384f62c1415538347fa751185e5c487673f82 | [
"Apache-2.0"
] | 3,078 | 2019-08-25T19:50:14.000Z | 2022-03-31T23:26:14.000Z | lib/streamlit/elements/multiselect.py | AnOctopus/streamlit | 6c5384f62c1415538347fa751185e5c487673f82 | [
"Apache-2.0"
] | 1,892 | 2019-08-26T04:44:24.000Z | 2022-03-30T16:11:51.000Z | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from textwrap import dedent
from typing import Any, Callable, Optional, cast, List
import streamlit
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.state.widgets import register_widget
from streamlit.type_util import Key, OptionSequence, ensure_indexable, is_type, to_key
from streamlit.state.session_state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
)
from .form import current_form_id
from .utils import check_callback_rules, check_session_state_rules
class MultiSelectMixin:
def multiselect(
self,
label: str,
options: OptionSequence,
default: Optional[Any] = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
) -> List[Any]:
"""Display a multiselect widget.
The multiselect widget starts as empty.
Parameters
----------
label : str
A short label explaining to the user what this select widget is for.
options : Sequence[V], numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
Labels for the select options. This will be cast to str internally
by default. For pandas.DataFrame, the first column is selected.
default: [V], V, or None
List of default values. Can also be a single value.
format_func : function
Function to modify the display of selectbox options. It receives
the raw option as an argument and should output the label to be
shown for that option. This has no impact on the return value of
the multiselect.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the multiselect.
on_change : callable
An optional callback invoked when this multiselect's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the multiselect widget if set
to True. The default is False. This argument can only be supplied
by keyword.
Returns
-------
list
A list with the selected options
Example
-------
>>> options = st.multiselect(
... 'What are your favorite colors',
... ['Green', 'Yellow', 'Red', 'Blue'],
... ['Yellow', 'Red'])
>>>
>>> st.write('You selected:', options)
.. note::
User experience can be degraded for large lists of `options` (100+), as this widget
is not designed to handle arbitrary text search efficiently. See this
`thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_
on the Streamlit community forum for more information and
`GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue.
"""
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=default, key=key)
opt = ensure_indexable(options)
# Perform validation checks and return indices base on the default values.
def _check_and_convert_to_indices(opt, default_values):
if default_values is None and None not in opt:
return None
if not isinstance(default_values, list):
# This if is done before others because calling if not x (done
# right below) when x is of type pd.Series() or np.array() throws a
# ValueError exception.
if is_type(default_values, "numpy.ndarray") or is_type(
default_values, "pandas.core.series.Series"
):
default_values = list(default_values)
elif not default_values or default_values in opt:
default_values = [default_values]
else:
default_values = list(default_values)
for value in default_values:
if value not in opt:
raise StreamlitAPIException(
"Every Multiselect default value must exist in options"
)
return [opt.index(value) for value in default_values]
indices = _check_and_convert_to_indices(opt, default)
multiselect_proto = MultiSelectProto()
multiselect_proto.label = label
default_value = [] if indices is None else indices
multiselect_proto.default[:] = default_value
multiselect_proto.options[:] = [str(format_func(option)) for option in opt]
multiselect_proto.form_id = current_form_id(self.dg)
multiselect_proto.disabled = disabled
if help is not None:
multiselect_proto.help = dedent(help)
def deserialize_multiselect(
ui_value: Optional[List[int]], widget_id: str = ""
) -> List[str]:
current_value = ui_value if ui_value is not None else default_value
return [opt[i] for i in current_value]
def serialize_multiselect(value):
return _check_and_convert_to_indices(opt, value)
current_value, set_frontend_value = register_widget(
"multiselect",
multiselect_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=deserialize_multiselect,
serializer=serialize_multiselect,
)
if set_frontend_value:
multiselect_proto.value[:] = _check_and_convert_to_indices(
opt, current_value
)
multiselect_proto.set_value = True
self.dg._enqueue("multiselect", multiselect_proto)
return cast(List[str], current_value)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 40.640884 | 111 | 0.635943 |
from textwrap import dedent
from typing import Any, Callable, Optional, cast, List
import streamlit
from streamlit.errors import StreamlitAPIException
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.state.widgets import register_widget
from streamlit.type_util import Key, OptionSequence, ensure_indexable, is_type, to_key
from streamlit.state.session_state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
)
from .form import current_form_id
from .utils import check_callback_rules, check_session_state_rules
class MultiSelectMixin:
def multiselect(
self,
label: str,
options: OptionSequence,
default: Optional[Any] = None,
format_func: Callable[[Any], Any] = str,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*,
disabled: bool = False,
) -> List[Any]:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(default_value=default, key=key)
opt = ensure_indexable(options)
def _check_and_convert_to_indices(opt, default_values):
if default_values is None and None not in opt:
return None
if not isinstance(default_values, list):
if is_type(default_values, "numpy.ndarray") or is_type(
default_values, "pandas.core.series.Series"
):
default_values = list(default_values)
elif not default_values or default_values in opt:
default_values = [default_values]
else:
default_values = list(default_values)
for value in default_values:
if value not in opt:
raise StreamlitAPIException(
"Every Multiselect default value must exist in options"
)
return [opt.index(value) for value in default_values]
indices = _check_and_convert_to_indices(opt, default)
multiselect_proto = MultiSelectProto()
multiselect_proto.label = label
default_value = [] if indices is None else indices
multiselect_proto.default[:] = default_value
multiselect_proto.options[:] = [str(format_func(option)) for option in opt]
multiselect_proto.form_id = current_form_id(self.dg)
multiselect_proto.disabled = disabled
if help is not None:
multiselect_proto.help = dedent(help)
def deserialize_multiselect(
ui_value: Optional[List[int]], widget_id: str = ""
) -> List[str]:
current_value = ui_value if ui_value is not None else default_value
return [opt[i] for i in current_value]
def serialize_multiselect(value):
return _check_and_convert_to_indices(opt, value)
current_value, set_frontend_value = register_widget(
"multiselect",
multiselect_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=deserialize_multiselect,
serializer=serialize_multiselect,
)
if set_frontend_value:
multiselect_proto.value[:] = _check_and_convert_to_indices(
opt, current_value
)
multiselect_proto.set_value = True
self.dg._enqueue("multiselect", multiselect_proto)
return cast(List[str], current_value)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
return cast("streamlit.delta_generator.DeltaGenerator", self)
| true | true |
1c33566bd3a34dff0c3f58954874dafac386d95b | 9,622 | py | Python | dask_ml/linear_model/glm.py | laprej/dask-ml | 78b1d942eae14db442a744f8812c3e94a8f31272 | [
"BSD-3-Clause"
] | 3 | 2017-06-13T22:36:45.000Z | 2017-09-20T16:08:47.000Z | dask_ml/linear_model/glm.py | laprej/dask-ml | 78b1d942eae14db442a744f8812c3e94a8f31272 | [
"BSD-3-Clause"
] | null | null | null | dask_ml/linear_model/glm.py | laprej/dask-ml | 78b1d942eae14db442a744f8812c3e94a8f31272 | [
"BSD-3-Clause"
] | 1 | 2019-12-03T13:23:52.000Z | 2019-12-03T13:23:52.000Z | # -*- coding: utf-8 -*-
"""Generalized Linear Models for large datasets."""
import textwrap
from dask_glm import algorithms, families
from dask_glm.utils import (accuracy_score, add_intercept, dot, exp,
mean_squared_error, poisson_deviance, sigmoid)
from sklearn.base import BaseEstimator
# register multipledispatch
from . import utils # noqa
from ..utils import check_array
_base_doc = textwrap.dedent("""\
Esimator for {regression_type}.
Parameters
----------
penalty : str or Regularizer, default 'l2'
Regularizer to use. Only relevant for the 'admm', 'lbfgs' and
'proximal_grad' solvers.
For string values, only 'l1' or 'l2' are valid.
dual : bool
Ignored
tol : float, default 1e-4
The tolerance for convergence.
C : float
Regularization strength. Note that ``dask-glm`` solvers use
the parameterization :math:`\lambda = 1 / C`
fit_intercept : bool, default True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : bool
Ignored
class_weight : dict or 'balanced'
Ignored
random_state : int, RandomState, or None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Used when solver == ‘sag’ or ‘liblinear’.
solver : {{'admm', 'gradient_descent', 'newton', 'lbfgs', 'proximal_grad'}}
Solver to use. See :ref:`api.algorithms` for details
multiclass : str, default 'ovr'
Ignored. Multiclass solvers not currently supported.
verbose : int, default 0
Ignored
warm_start : bool, default False
Ignored
n_jobs : int, default 1
Ignored
solver_kwargs : dict, optional, default None
Extra keyword arguments to pass through to the solver.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
The learned value for the model's coefficients
intercept_ : float of None
The learned value for the intercept, if one was added
to the model
Examples
--------
{examples}
""")
class _GLM(BaseEstimator):
@property
def family(self):
"""
The family this estimator is for.
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1.0, class_weight=None,
random_state=None, solver='admm', multiclass='ovr', verbose=0,
warm_start=False, n_jobs=1, max_iter=100, solver_kwargs=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.multiclass = multiclass
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.max_iter = max_iter
self.solver_kwargs = solver_kwargs
def _get_solver_kwargs(self):
fit_kwargs = {'max_iter': self.max_iter,
'family': self.family,
'tol': self.tol,
'regularizer': self.penalty,
'lamduh': 1 / self.C}
if self.solver in ('gradient_descent', 'newton'):
fit_kwargs.pop('regularizer')
fit_kwargs.pop('lamduh')
if self.solver == 'admm':
fit_kwargs.pop('tol') # uses reltol / abstol instead
if self.solver_kwargs:
fit_kwargs.update(self.solver_kwargs)
solvers = {'admm', 'proximal_grad', 'lbfgs', 'newton',
'proximal_grad', 'gradient_descent'}
if self.solver not in solvers:
msg = ("'solver' must be {}. Got '{}' instead".format(solvers,
self.solver))
raise ValueError(msg)
return fit_kwargs
def fit(self, X, y=None):
"""Fit the model on the training data
Parameters
----------
X: array-like, shape (n_samples, n_features)
y : array-like, shape (n_samples,)
Returns
-------
self : objectj
"""
X = self._check_array(X)
solver_kwargs = self._get_solver_kwargs()
self._coef = algorithms._solvers[self.solver](X, y, **solver_kwargs)
if self.fit_intercept:
self.coef_ = self._coef[:-1]
self.intercept_ = self._coef[-1]
else:
self.coef_ = self._coef
return self
def _check_array(self, X):
if self.fit_intercept:
X = add_intercept(X)
return check_array(X, accept_unknown_chunks=True)
class LogisticRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='logistic_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_classification
>>> X, y = make_classification()
>>> lr = LogisticRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict_proba(X)
>>> est.score(X, y)"""))
@property
def family(self):
return families.Logistic
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted class labels for each sample
"""
return self.predict_proba(X) > .5 # TODO: verify, multiclass broken
def predict_proba(self, X):
"""Probability estimates for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
The probability of the sample for each class in the model.
"""
X_ = self._check_array(X)
return sigmoid(dot(X_, self._coef))
def score(self, X, y):
"""The mean accuracy on the given data and labels
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Test samples.
y : array-like, shape = [n_samples,]
Test labels.
Returns
-------
score : float
Mean accuracy score
"""
return accuracy_score(y, self.predict(X))
class LinearRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='linear_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_regression
>>> X, y = make_regression()
>>> lr = LinearRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict(X)
>>> est.score(X, y)"""))
@property
def family(self):
return families.Normal
def predict(self, X):
"""Predict values for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted value for each sample
"""
X_ = self._check_array(X)
return dot(X_, self._coef)
def score(self, X, y):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
return mean_squared_error(y, self.predict(X))
class PoissonRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='poisson_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_counts
>>> X, y = make_counts()
>>> lr = PoissonRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict(X)
>>> lr.get_deviance(X, y)"""))
@property
def family(self):
return families.Poisson
def predict(self, X):
"""Predict count for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted count for each sample
"""
X_ = self._check_array(X)
return exp(dot(X_, self._coef))
def get_deviance(self, X, y):
return poisson_deviance(y, self.predict(X))
| 29.515337 | 79 | 0.568177 |
import textwrap
from dask_glm import algorithms, families
from dask_glm.utils import (accuracy_score, add_intercept, dot, exp,
mean_squared_error, poisson_deviance, sigmoid)
from sklearn.base import BaseEstimator
from . import utils
from ..utils import check_array
_base_doc = textwrap.dedent("""\
Esimator for {regression_type}.
Parameters
----------
penalty : str or Regularizer, default 'l2'
Regularizer to use. Only relevant for the 'admm', 'lbfgs' and
'proximal_grad' solvers.
For string values, only 'l1' or 'l2' are valid.
dual : bool
Ignored
tol : float, default 1e-4
The tolerance for convergence.
C : float
Regularization strength. Note that ``dask-glm`` solvers use
the parameterization :math:`\lambda = 1 / C`
fit_intercept : bool, default True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : bool
Ignored
class_weight : dict or 'balanced'
Ignored
random_state : int, RandomState, or None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Used when solver == ‘sag’ or ‘liblinear’.
solver : {{'admm', 'gradient_descent', 'newton', 'lbfgs', 'proximal_grad'}}
Solver to use. See :ref:`api.algorithms` for details
multiclass : str, default 'ovr'
Ignored. Multiclass solvers not currently supported.
verbose : int, default 0
Ignored
warm_start : bool, default False
Ignored
n_jobs : int, default 1
Ignored
solver_kwargs : dict, optional, default None
Extra keyword arguments to pass through to the solver.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
The learned value for the model's coefficients
intercept_ : float of None
The learned value for the intercept, if one was added
to the model
Examples
--------
{examples}
""")
class _GLM(BaseEstimator):
@property
def family(self):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1.0, class_weight=None,
random_state=None, solver='admm', multiclass='ovr', verbose=0,
warm_start=False, n_jobs=1, max_iter=100, solver_kwargs=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.multiclass = multiclass
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.max_iter = max_iter
self.solver_kwargs = solver_kwargs
def _get_solver_kwargs(self):
fit_kwargs = {'max_iter': self.max_iter,
'family': self.family,
'tol': self.tol,
'regularizer': self.penalty,
'lamduh': 1 / self.C}
if self.solver in ('gradient_descent', 'newton'):
fit_kwargs.pop('regularizer')
fit_kwargs.pop('lamduh')
if self.solver == 'admm':
fit_kwargs.pop('tol') # uses reltol / abstol instead
if self.solver_kwargs:
fit_kwargs.update(self.solver_kwargs)
solvers = {'admm', 'proximal_grad', 'lbfgs', 'newton',
'proximal_grad', 'gradient_descent'}
if self.solver not in solvers:
msg = ("'solver' must be {}. Got '{}' instead".format(solvers,
self.solver))
raise ValueError(msg)
return fit_kwargs
def fit(self, X, y=None):
X = self._check_array(X)
solver_kwargs = self._get_solver_kwargs()
self._coef = algorithms._solvers[self.solver](X, y, **solver_kwargs)
if self.fit_intercept:
self.coef_ = self._coef[:-1]
self.intercept_ = self._coef[-1]
else:
self.coef_ = self._coef
return self
def _check_array(self, X):
if self.fit_intercept:
X = add_intercept(X)
return check_array(X, accept_unknown_chunks=True)
class LogisticRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='logistic_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_classification
>>> X, y = make_classification()
>>> lr = LogisticRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict_proba(X)
>>> est.score(X, y)"""))
@property
def family(self):
return families.Logistic
def predict(self, X):
return self.predict_proba(X) > .5 # TODO: verify, multiclass broken
def predict_proba(self, X):
X_ = self._check_array(X)
return sigmoid(dot(X_, self._coef))
def score(self, X, y):
return accuracy_score(y, self.predict(X))
class LinearRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='linear_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_regression
>>> X, y = make_regression()
>>> lr = LinearRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict(X)
>>> est.score(X, y)"""))
@property
def family(self):
return families.Normal
def predict(self, X):
X_ = self._check_array(X)
return dot(X_, self._coef)
def score(self, X, y):
return mean_squared_error(y, self.predict(X))
class PoissonRegression(_GLM):
__doc__ = _base_doc.format(
regression_type='poisson_regression',
examples=textwrap.dedent("""
>>> from dask_glm.datasets import make_counts
>>> X, y = make_counts()
>>> lr = PoissonRegression()
>>> lr.fit(X, y)
>>> lr.predict(X)
>>> lr.predict(X)
>>> lr.get_deviance(X, y)"""))
@property
def family(self):
return families.Poisson
def predict(self, X):
X_ = self._check_array(X)
return exp(dot(X_, self._coef))
def get_deviance(self, X, y):
return poisson_deviance(y, self.predict(X))
| true | true |
1c3357a25cce6c5948007ae0ef84e1c93beb4b30 | 697 | py | Python | fixture/application.py | VictorKazankov/Python_training_mantis | 02b85302079388e626a78e8289bfb979af3e7ed4 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | VictorKazankov/Python_training_mantis | 02b85302079388e626a78e8289bfb979af3e7ed4 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | VictorKazankov/Python_training_mantis | 02b85302079388e626a78e8289bfb979af3e7ed4 | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.firefox.webdriver import WebDriver
from fixture.session import SessionHelper
from fixture.project import ProjectHelper
from fixture.soap import SoapHelper
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.project = ProjectHelper(self)
self.soap = SoapHelper(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/mantisbt-1.2.19/")
def destroy(self):
self.wd.quit() | 25.814815 | 58 | 0.642755 | from selenium.webdriver.firefox.webdriver import WebDriver
from fixture.session import SessionHelper
from fixture.project import ProjectHelper
from fixture.soap import SoapHelper
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.project = ProjectHelper(self)
self.soap = SoapHelper(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/mantisbt-1.2.19/")
def destroy(self):
self.wd.quit() | true | true |
1c33584b6448febf032bed615847b0e83b564b17 | 6,610 | py | Python | mobula/build.py | mgno32/MobulaOP | a58c06216ee6768cf4c46610b802c8b96bf3240d | [
"MIT"
] | null | null | null | mobula/build.py | mgno32/MobulaOP | a58c06216ee6768cf4c46610b802c8b96bf3240d | [
"MIT"
] | null | null | null | mobula/build.py | mgno32/MobulaOP | a58c06216ee6768cf4c46610b802c8b96bf3240d | [
"MIT"
] | null | null | null | """Building Implementation"""
import sys
import multiprocessing
try:
from .build_utils import *
except Exception:
from build_utils import *
NUM_CPU_CORE = multiprocessing.cpu_count()
HOST_NUM_THREADS = config.HOST_NUM_THREADS if config.HOST_NUM_THREADS > 0 else NUM_CPU_CORE
COMMON_FLAGS = Flags().add_definition('HOST_NUM_THREADS', HOST_NUM_THREADS)
if config.USING_OPTIMIZATION:
COMMON_FLAGS.add_string('-O3')
if config.DEBUG:
COMMON_FLAGS.add_string('-g')
COMMON_FLAGS.add_definition('USING_CBLAS', config.USING_CBLAS)
INC_PATHS.append('inc')
for path in INC_PATHS:
p = os.path.join(ENV_PATH, path)
if p:
COMMON_FLAGS.add_string('-I{}'.format(p))
CFLAGS = Flags('-std=c++11').add_definition('USING_CUDA', 0).\
add_definition('USING_HIP', 0).add_definition('USING_OPENMP', config.USING_OPENMP).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
CFLAGS.add_string('-fPIC')
LDFLAGS = Flags('-lpthread -shared')
if config.USING_CBLAS:
LDFLAGS.add_string('-lopenblas')
CU_FLAGS = Flags('-std=c++11 -x cu -Wno-deprecated-gpu-targets -dc \
--expt-extended-lambda').\
add_definition('USING_CUDA', 1).\
add_definition('USING_HIP', 0).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
CU_FLAGS.add_string('--compiler-options "-fPIC"')
CU_LDFLAGS = Flags('-shared -Wno-deprecated-gpu-targets \
-L%s/lib64 -lcuda -lcudart' % config.CUDA_DIR)
if config.USING_CBLAS:
CU_LDFLAGS.add_string('-lcublas')
HIP_FLAGS = Flags('-std=c++11 -Wno-deprecated-gpu-targets -Wno-deprecated-declarations -dc \
--expt-extended-lambda').\
add_definition('USING_CUDA', 0).\
add_definition('USING_HIP', 1).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
HIP_FLAGS.add_string('--compiler-options "-fPIC"')
HIP_LDFLAGS = Flags('-shared -Wno-deprecated-gpu-targets')
if config.USING_CBLAS:
HIP_LDFLAGS.add_string('-lhipblas')
if config.USING_OPENMP:
CFLAGS.add_string('-fopenmp')
LDFLAGS.add_string('-fopenmp')
if config.USING_HIGH_LEVEL_WARNINGS:
CFLAGS.add_string('-Werror -Wall -Wextra -pedantic -Wcast-align -Wcast-qual \
-Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wmissing-include-dirs \
-Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow \
-Wsign-promo -Wundef -fdiagnostics-show-option')
def source_to_o(build_path, src_obj, compiler=config.CXX, cflags=CFLAGS):
mkdir(build_path)
existed_dirs = set()
updated = False
commands = []
for src, obj in src_obj:
dir_name = os.path.dirname(obj)
build_dir_name = os.path.join(build_path, dir_name)
build_name = os.path.join(build_path, obj)
if file_is_latest(src) and os.path.exists(build_name):
continue
updated = True
if build_dir_name not in existed_dirs:
mkdir(build_dir_name)
existed_dirs.add(build_dir_name)
if OS_IS_WINDOWS and not command_exists(compiler):
inc_flags = Flags()
for path in INC_PATHS:
p = os.path.join(ENV_PATH, path)
inc_flags.add_string('-I{}'.format(p))
cflags_sp = str(cflags).split()
def_flags = ' '.join(
[s for s in cflags_sp if len(s) > 2 and s[:2] == '-D'])
command = 'cl /O2 %s %s -c %s -Fo%s' % (
def_flags, inc_flags, src, build_name)
else:
command = '%s %s %s -c -o %s' % (compiler, src, cflags, build_name)
commands.append(command)
run_command_parallel(commands)
return updated
def o_to_so(target_name, objs, linker, ldflags=LDFLAGS):
if OS_IS_WINDOWS and not command_exists(linker):
command = 'link -DLL %s -out:%s' % (' '.join(objs), target_name)
else:
command = '%s %s %s -o %s' % (linker,
' '.join(objs), ldflags, target_name)
run_command(command)
def source_to_so(build_path, srcs, target_name, compiler, cflags, ldflags, buildin_o=None):
objs = change_exts(srcs, [('cpp', 'o')])
if source_to_o(build_path, zip(srcs, objs), compiler, cflags) or\
not os.path.exists(target_name):
if buildin_o is not None:
objs.extend(buildin_o)
abs_objs = add_path(build_path, objs)
o_to_so(target_name, abs_objs, compiler, ldflags)
BUILD_FLAGS = dict(
cpu=(config.CXX, CFLAGS, LDFLAGS),
cuda=(config.NVCC, CU_FLAGS, CU_LDFLAGS),
hip=(config.HIPCC, HIP_FLAGS, HIP_LDFLAGS)
)
def source_to_so_ctx(build_path, srcs, target_name, ctx_name, buildin_cpp=None):
assert ctx_name in BUILD_FLAGS, ValueError(
'Unsupported Context: {} -('.format(ctx_name))
buildin_o = []
if buildin_cpp is not None:
buildin_path = os.path.join(ENV_PATH, config.BUILD_PATH, ctx_name)
buildin_o = [os.path.join(buildin_path, fname) for fname in
change_exts(buildin_cpp, [('cpp', 'o')])]
for fname in buildin_o:
assert os.path.exists(fname),\
Exception(
'File {} not found, please rebuild MobulaOP :-('.format(fname))
flags = BUILD_FLAGS[ctx_name] + (buildin_o, )
source_to_so(build_path, srcs, target_name, *flags)
def cpu_func():
# cpu
build_path = os.path.join(config.BUILD_PATH, 'cpu')
target_name = os.path.join(config.BUILD_PATH, '%s_cpu.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'cpu')
def cuda_func():
build_path = os.path.join(config.BUILD_PATH, 'cuda')
target_name = os.path.join(config.BUILD_PATH, '%s_cuda.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'cuda')
def hip_func():
build_path = os.path.join(config.BUILD_PATH, 'hip')
target_name = os.path.join(config.BUILD_PATH, '%s_hip.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'hip')
def clean_func():
rmdir(config.BUILD_PATH)
def all_func():
cpu_func()
if command_exists(config.NVCC):
cuda_func()
elif command_exists(config.HIPCC):
hip_func()
RULES = dict(
all=all_func,
cpu=cpu_func,
cuda=cuda_func,
hip=hip_func,
clean=clean_func,
)
def run_rule(name):
assert name in RULES, ValueError(
"No rule to make target '{}'".format(name))
RULES[name]()
if __name__ == '__main__':
assert len(sys.argv) >= 2, AssertionError(
'Please add building flag, e.g. python build.py all\nValid flags: {}'.format(' | '.join(BUILD_FLAGS.keys())))
pass_argv(sys.argv)
SRCS = wildcard(['src'], 'cpp')
with build_context():
run_rule(sys.argv[1])
| 33.72449 | 117 | 0.663843 | import sys
import multiprocessing
try:
from .build_utils import *
except Exception:
from build_utils import *
NUM_CPU_CORE = multiprocessing.cpu_count()
HOST_NUM_THREADS = config.HOST_NUM_THREADS if config.HOST_NUM_THREADS > 0 else NUM_CPU_CORE
COMMON_FLAGS = Flags().add_definition('HOST_NUM_THREADS', HOST_NUM_THREADS)
if config.USING_OPTIMIZATION:
COMMON_FLAGS.add_string('-O3')
if config.DEBUG:
COMMON_FLAGS.add_string('-g')
COMMON_FLAGS.add_definition('USING_CBLAS', config.USING_CBLAS)
INC_PATHS.append('inc')
for path in INC_PATHS:
p = os.path.join(ENV_PATH, path)
if p:
COMMON_FLAGS.add_string('-I{}'.format(p))
CFLAGS = Flags('-std=c++11').add_definition('USING_CUDA', 0).\
add_definition('USING_HIP', 0).add_definition('USING_OPENMP', config.USING_OPENMP).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
CFLAGS.add_string('-fPIC')
LDFLAGS = Flags('-lpthread -shared')
if config.USING_CBLAS:
LDFLAGS.add_string('-lopenblas')
CU_FLAGS = Flags('-std=c++11 -x cu -Wno-deprecated-gpu-targets -dc \
--expt-extended-lambda').\
add_definition('USING_CUDA', 1).\
add_definition('USING_HIP', 0).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
CU_FLAGS.add_string('--compiler-options "-fPIC"')
CU_LDFLAGS = Flags('-shared -Wno-deprecated-gpu-targets \
-L%s/lib64 -lcuda -lcudart' % config.CUDA_DIR)
if config.USING_CBLAS:
CU_LDFLAGS.add_string('-lcublas')
HIP_FLAGS = Flags('-std=c++11 -Wno-deprecated-gpu-targets -Wno-deprecated-declarations -dc \
--expt-extended-lambda').\
add_definition('USING_CUDA', 0).\
add_definition('USING_HIP', 1).\
add_string(COMMON_FLAGS)
if not OS_IS_WINDOWS:
HIP_FLAGS.add_string('--compiler-options "-fPIC"')
HIP_LDFLAGS = Flags('-shared -Wno-deprecated-gpu-targets')
if config.USING_CBLAS:
HIP_LDFLAGS.add_string('-lhipblas')
if config.USING_OPENMP:
CFLAGS.add_string('-fopenmp')
LDFLAGS.add_string('-fopenmp')
if config.USING_HIGH_LEVEL_WARNINGS:
CFLAGS.add_string('-Werror -Wall -Wextra -pedantic -Wcast-align -Wcast-qual \
-Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wmissing-include-dirs \
-Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow \
-Wsign-promo -Wundef -fdiagnostics-show-option')
def source_to_o(build_path, src_obj, compiler=config.CXX, cflags=CFLAGS):
mkdir(build_path)
existed_dirs = set()
updated = False
commands = []
for src, obj in src_obj:
dir_name = os.path.dirname(obj)
build_dir_name = os.path.join(build_path, dir_name)
build_name = os.path.join(build_path, obj)
if file_is_latest(src) and os.path.exists(build_name):
continue
updated = True
if build_dir_name not in existed_dirs:
mkdir(build_dir_name)
existed_dirs.add(build_dir_name)
if OS_IS_WINDOWS and not command_exists(compiler):
inc_flags = Flags()
for path in INC_PATHS:
p = os.path.join(ENV_PATH, path)
inc_flags.add_string('-I{}'.format(p))
cflags_sp = str(cflags).split()
def_flags = ' '.join(
[s for s in cflags_sp if len(s) > 2 and s[:2] == '-D'])
command = 'cl /O2 %s %s -c %s -Fo%s' % (
def_flags, inc_flags, src, build_name)
else:
command = '%s %s %s -c -o %s' % (compiler, src, cflags, build_name)
commands.append(command)
run_command_parallel(commands)
return updated
def o_to_so(target_name, objs, linker, ldflags=LDFLAGS):
if OS_IS_WINDOWS and not command_exists(linker):
command = 'link -DLL %s -out:%s' % (' '.join(objs), target_name)
else:
command = '%s %s %s -o %s' % (linker,
' '.join(objs), ldflags, target_name)
run_command(command)
def source_to_so(build_path, srcs, target_name, compiler, cflags, ldflags, buildin_o=None):
objs = change_exts(srcs, [('cpp', 'o')])
if source_to_o(build_path, zip(srcs, objs), compiler, cflags) or\
not os.path.exists(target_name):
if buildin_o is not None:
objs.extend(buildin_o)
abs_objs = add_path(build_path, objs)
o_to_so(target_name, abs_objs, compiler, ldflags)
BUILD_FLAGS = dict(
cpu=(config.CXX, CFLAGS, LDFLAGS),
cuda=(config.NVCC, CU_FLAGS, CU_LDFLAGS),
hip=(config.HIPCC, HIP_FLAGS, HIP_LDFLAGS)
)
def source_to_so_ctx(build_path, srcs, target_name, ctx_name, buildin_cpp=None):
assert ctx_name in BUILD_FLAGS, ValueError(
'Unsupported Context: {} -('.format(ctx_name))
buildin_o = []
if buildin_cpp is not None:
buildin_path = os.path.join(ENV_PATH, config.BUILD_PATH, ctx_name)
buildin_o = [os.path.join(buildin_path, fname) for fname in
change_exts(buildin_cpp, [('cpp', 'o')])]
for fname in buildin_o:
assert os.path.exists(fname),\
Exception(
'File {} not found, please rebuild MobulaOP :-('.format(fname))
flags = BUILD_FLAGS[ctx_name] + (buildin_o, )
source_to_so(build_path, srcs, target_name, *flags)
def cpu_func():
build_path = os.path.join(config.BUILD_PATH, 'cpu')
target_name = os.path.join(config.BUILD_PATH, '%s_cpu.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'cpu')
def cuda_func():
build_path = os.path.join(config.BUILD_PATH, 'cuda')
target_name = os.path.join(config.BUILD_PATH, '%s_cuda.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'cuda')
def hip_func():
build_path = os.path.join(config.BUILD_PATH, 'hip')
target_name = os.path.join(config.BUILD_PATH, '%s_hip.so' % config.TARGET)
source_to_so_ctx(build_path, SRCS, target_name, 'hip')
def clean_func():
rmdir(config.BUILD_PATH)
def all_func():
cpu_func()
if command_exists(config.NVCC):
cuda_func()
elif command_exists(config.HIPCC):
hip_func()
RULES = dict(
all=all_func,
cpu=cpu_func,
cuda=cuda_func,
hip=hip_func,
clean=clean_func,
)
def run_rule(name):
assert name in RULES, ValueError(
"No rule to make target '{}'".format(name))
RULES[name]()
if __name__ == '__main__':
assert len(sys.argv) >= 2, AssertionError(
'Please add building flag, e.g. python build.py all\nValid flags: {}'.format(' | '.join(BUILD_FLAGS.keys())))
pass_argv(sys.argv)
SRCS = wildcard(['src'], 'cpp')
with build_context():
run_rule(sys.argv[1])
| true | true |
1c33588cd2cfd239421257da498b156f986374d1 | 3,629 | py | Python | DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/model/layers/array_ops.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/model/layers/array_ops.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Official/cv/image_segmentation/UNet_Industrial_for_TensorFlow/model/layers/array_ops.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['concat', 'flatten', 'reshape', 'squeeze', 'upscale_2d']
def concat(values, axis, name='concat'):
net = tf.concat(values=values, axis=axis, name=name)
_log_hparams(classname='Concat', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def flatten(inputs, name='flatten'):
net = tf.layers.flatten(inputs, name=name)
_log_hparams(classname='Flatten', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def reshape(tensor, shape, name='reshape'):
net = tf.reshape(tensor, shape=shape, name=name)
_log_hparams(
classname='Reshape', layername=net.name, shape=shape, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def squeeze(tensor, axis, name='squeeze'):
net = tf.squeeze(tensor, axis=axis, name=name)
_log_hparams(
classname='Squeeze', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def upscale_2d(inputs, size, is_scale=True, method=0, align_corners=True, data_format='NHWC', name='upsample2d_layer'):
if not isinstance(size, (list, tuple)) and len(size) == 2:
raise AssertionError()
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format received: `%s` (allowed: `NHWC`, `NCHW`)" % data_format)
input_shape = inputs.get_shape()
if len(inputs.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(inputs.get_shape()[0])
size_w = size[1] * int(inputs.get_shape()[1])
_size = [size_h, size_w]
else:
_size = size
elif len(inputs.get_shape()) == 4:
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 2, 3, 1]) # NCHW => NHWC
if is_scale:
size_h = size[0] * int(inputs.get_shape()[1])
size_w = size[1] * int(inputs.get_shape()[2])
_size = [size_h, size_w]
else:
_size = size
else:
raise Exception("Do not support shape %s" % str(inputs.get_shape()))
with tf.variable_scope(name):
net = tf.image.resize_images(inputs, size=_size, method=method, align_corners=align_corners)
if data_format == 'NCHW' and len(inputs.get_shape()) == 4:
net = tf.transpose(net, [0, 3, 1, 2]) # NHWC => NCHW
_log_hparams(
classname='Upscale2D',
layername=net.name,
size=size,
is_scale=is_scale,
method=method,
align_corners=align_corners,
data_format=data_format,
input_shape=str(input_shape),
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| 29.991736 | 120 | 0.618903 |
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['concat', 'flatten', 'reshape', 'squeeze', 'upscale_2d']
def concat(values, axis, name='concat'):
net = tf.concat(values=values, axis=axis, name=name)
_log_hparams(classname='Concat', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def flatten(inputs, name='flatten'):
net = tf.layers.flatten(inputs, name=name)
_log_hparams(classname='Flatten', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def reshape(tensor, shape, name='reshape'):
net = tf.reshape(tensor, shape=shape, name=name)
_log_hparams(
classname='Reshape', layername=net.name, shape=shape, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def squeeze(tensor, axis, name='squeeze'):
net = tf.squeeze(tensor, axis=axis, name=name)
_log_hparams(
classname='Squeeze', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def upscale_2d(inputs, size, is_scale=True, method=0, align_corners=True, data_format='NHWC', name='upsample2d_layer'):
if not isinstance(size, (list, tuple)) and len(size) == 2:
raise AssertionError()
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format received: `%s` (allowed: `NHWC`, `NCHW`)" % data_format)
input_shape = inputs.get_shape()
if len(inputs.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(inputs.get_shape()[0])
size_w = size[1] * int(inputs.get_shape()[1])
_size = [size_h, size_w]
else:
_size = size
elif len(inputs.get_shape()) == 4:
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
if is_scale:
size_h = size[0] * int(inputs.get_shape()[1])
size_w = size[1] * int(inputs.get_shape()[2])
_size = [size_h, size_w]
else:
_size = size
else:
raise Exception("Do not support shape %s" % str(inputs.get_shape()))
with tf.variable_scope(name):
net = tf.image.resize_images(inputs, size=_size, method=method, align_corners=align_corners)
if data_format == 'NCHW' and len(inputs.get_shape()) == 4:
net = tf.transpose(net, [0, 3, 1, 2])
_log_hparams(
classname='Upscale2D',
layername=net.name,
size=size,
is_scale=is_scale,
method=method,
align_corners=align_corners,
data_format=data_format,
input_shape=str(input_shape),
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| true | true |
1c3359893ff7287a55b182731ef1b9f1b460a2db | 3,104 | py | Python | python/examples/scan_external_sources_custom_path_data_rules.py | fvaleye/metadata-guardian | ab5d6cada67785c3cfd98112f68e8fdb193d1617 | [
"Apache-2.0"
] | 9 | 2021-12-31T20:32:35.000Z | 2022-02-18T17:51:49.000Z | python/examples/scan_external_sources_custom_path_data_rules.py | fvaleye/metadata-guardian | ab5d6cada67785c3cfd98112f68e8fdb193d1617 | [
"Apache-2.0"
] | 1 | 2022-02-25T16:35:04.000Z | 2022-02-28T21:08:53.000Z | python/examples/scan_external_sources_custom_path_data_rules.py | fvaleye/metadata-guardian | ab5d6cada67785c3cfd98112f68e8fdb193d1617 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
from metadata_guardian import (
AvailableCategory,
ColumnScanner,
DataRules,
ExternalMetadataSource,
)
from metadata_guardian.source import (
AthenaSource,
BigQuerySource,
DeltaTableSource,
GlueSource,
KafkaSchemaRegistrySource,
MySQLSource,
SnowflakeSource,
)
def get_snowflake() -> ExternalMetadataSource:
return SnowflakeSource(
sf_account=os.environ["SNOWFLAKE_ACCOUNT"],
sf_user=os.environ["SNOWFLAKE_USER"],
sf_password=os.environ["SNOWFLAKE_PASSWORD"],
warehouse=os.environ["SNOWFLAKE_WAREHOUSE"],
schema_name=os.environ["SNOWFLAKE_SCHEMA_NAME"],
)
def get_gcp_bigquery() -> ExternalMetadataSource:
return BigQuerySource(
service_account_json_path=os.environ["BIGQUERY_SERVICE_ACCOUNT"],
project=os.environ["BIGQUERY_PROJECT"],
location=os.environ["BIGQUERY_LOCATION"],
)
def get_kafka_schema_registry() -> ExternalMetadataSource:
return KafkaSchemaRegistrySource(url=os.environ["KAFKA_SCHEMA_REGISTRY_URL"])
def get_delta_table() -> ExternalMetadataSource:
return DeltaTableSource(uri=os.environ["DELTA_TABLE_URI"])
def get_mysql() -> ExternalMetadataSource:
return MySQLSource(
user=os.environ["MYSQL_USER"],
password=os.environ["MYSQL_PASSWORD"],
host=os.environ["MYSQL_HOST"],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-rules-path",
required=True,
help="The Data Rules specification yaml file path to use for creating the Data Rules",
)
parser.add_argument(
"--external-source",
choices=[
"Snowflake",
"GCP BigQuery",
"Kafka Schema Registry",
"Delta Table",
"MySQL",
],
required=True,
help="The External Metadata Source to use",
)
parser.add_argument(
"--scanner", choices=["ColumnScanner"], help="The scanner to use"
)
parser.add_argument(
"--database_name", required=True, help="The database name to scan"
)
parser.add_argument(
"--include_comments", default=True, help="Include the comments in the scan"
)
args = parser.parse_args()
data_rules = DataRules.from_path(path=args.data_rules_path)
column_scanner = ColumnScanner(
data_rules=data_rules, progression_bar_disabled=False
)
if args.external_source == "Snowflake":
source = get_snowflake()
elif args.external_source == "GCP BigQuery":
source = get_gcp_bigquery()
elif args.external_source == "Kafka Schema Registry":
source = get_kafka_schema_registry()
elif args.external_source == "Delta Table":
source = get_delta_table()
elif args.external_source == "MySQL":
source = get_mysql()
with source:
report = column_scanner.scan_external(
source,
database_name=args.database_name,
include_comment=args.include_comments,
)
report.to_console()
| 29.009346 | 94 | 0.664948 | import argparse
import os
from metadata_guardian import (
AvailableCategory,
ColumnScanner,
DataRules,
ExternalMetadataSource,
)
from metadata_guardian.source import (
AthenaSource,
BigQuerySource,
DeltaTableSource,
GlueSource,
KafkaSchemaRegistrySource,
MySQLSource,
SnowflakeSource,
)
def get_snowflake() -> ExternalMetadataSource:
return SnowflakeSource(
sf_account=os.environ["SNOWFLAKE_ACCOUNT"],
sf_user=os.environ["SNOWFLAKE_USER"],
sf_password=os.environ["SNOWFLAKE_PASSWORD"],
warehouse=os.environ["SNOWFLAKE_WAREHOUSE"],
schema_name=os.environ["SNOWFLAKE_SCHEMA_NAME"],
)
def get_gcp_bigquery() -> ExternalMetadataSource:
return BigQuerySource(
service_account_json_path=os.environ["BIGQUERY_SERVICE_ACCOUNT"],
project=os.environ["BIGQUERY_PROJECT"],
location=os.environ["BIGQUERY_LOCATION"],
)
def get_kafka_schema_registry() -> ExternalMetadataSource:
return KafkaSchemaRegistrySource(url=os.environ["KAFKA_SCHEMA_REGISTRY_URL"])
def get_delta_table() -> ExternalMetadataSource:
return DeltaTableSource(uri=os.environ["DELTA_TABLE_URI"])
def get_mysql() -> ExternalMetadataSource:
return MySQLSource(
user=os.environ["MYSQL_USER"],
password=os.environ["MYSQL_PASSWORD"],
host=os.environ["MYSQL_HOST"],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-rules-path",
required=True,
help="The Data Rules specification yaml file path to use for creating the Data Rules",
)
parser.add_argument(
"--external-source",
choices=[
"Snowflake",
"GCP BigQuery",
"Kafka Schema Registry",
"Delta Table",
"MySQL",
],
required=True,
help="The External Metadata Source to use",
)
parser.add_argument(
"--scanner", choices=["ColumnScanner"], help="The scanner to use"
)
parser.add_argument(
"--database_name", required=True, help="The database name to scan"
)
parser.add_argument(
"--include_comments", default=True, help="Include the comments in the scan"
)
args = parser.parse_args()
data_rules = DataRules.from_path(path=args.data_rules_path)
column_scanner = ColumnScanner(
data_rules=data_rules, progression_bar_disabled=False
)
if args.external_source == "Snowflake":
source = get_snowflake()
elif args.external_source == "GCP BigQuery":
source = get_gcp_bigquery()
elif args.external_source == "Kafka Schema Registry":
source = get_kafka_schema_registry()
elif args.external_source == "Delta Table":
source = get_delta_table()
elif args.external_source == "MySQL":
source = get_mysql()
with source:
report = column_scanner.scan_external(
source,
database_name=args.database_name,
include_comment=args.include_comments,
)
report.to_console()
| true | true |
1c335a4a3f949a93d650c47d0aff470cf8e0234b | 5,901 | py | Python | app/galleries.py | atm08e/amiller.im-py3 | ddbc80d233d9bd2c645eb7cb2af73e951515fca4 | [
"MIT"
] | null | null | null | app/galleries.py | atm08e/amiller.im-py3 | ddbc80d233d9bd2c645eb7cb2af73e951515fca4 | [
"MIT"
] | null | null | null | app/galleries.py | atm08e/amiller.im-py3 | ddbc80d233d9bd2c645eb7cb2af73e951515fca4 | [
"MIT"
] | null | null | null | import json
# TODO async json
import logging
import os
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def gallery_json_loader(path):
logger.debug('Trying to open file: {}'.format(path))
with open(path, 'r+', encoding='utf-8') as f:
# TODO async
loaded_json = json.load(f)
#logger.debug('Loaded Json: {}'.format(loaded_json))
return loaded_json
def setup_galleries(path_to_static):
# TODO this is shit, there is a better way to do this
return {
'snowboarding':
{
'2016':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2016', 'wreckbreck2016.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2016', 'familybreck2016.json']))
},
'2017':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'wreckbreck2017.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'shredapalooza2017.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'ibelieveinspringbreak2017.json'])),
},
'2018':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'alpinemeadows2018.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'senditinvitational2018.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'sierraattahoe2018.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'whistlerblackcomb2018.json'])),
'5': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'wreckbreck2018.json']))
},
'2019':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'whistler-dec-2019.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'colorado-solo-2019.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'wreck-breck-2019.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'shred-a-palooza-2019.json'])),
'5': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'heavenly-spring-2019.json'])),
'6': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'kirkwood-spring-2019.json']))
}
},
'boating':
{
'2016':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'fishing-jan-2016.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'airshow-2016.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'fishing-dad-grandpa-july-2016.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'miniseason-2016.json']))
},
'2017':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2017', 'fishing-june-2017.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2017', 'keys-july-2017.json'])),
}
}
}
| 50.435897 | 120 | 0.401457 | import json
import logging
import os
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def gallery_json_loader(path):
logger.debug('Trying to open file: {}'.format(path))
with open(path, 'r+', encoding='utf-8') as f:
loaded_json = json.load(f)
return loaded_json
def setup_galleries(path_to_static):
return {
'snowboarding':
{
'2016':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2016', 'wreckbreck2016.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2016', 'familybreck2016.json']))
},
'2017':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'wreckbreck2017.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'shredapalooza2017.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2017', 'ibelieveinspringbreak2017.json'])),
},
'2018':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'alpinemeadows2018.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'senditinvitational2018.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'sierraattahoe2018.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'whistlerblackcomb2018.json'])),
'5': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2018', 'wreckbreck2018.json']))
},
'2019':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'whistler-dec-2019.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'colorado-solo-2019.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'wreck-breck-2019.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'shred-a-palooza-2019.json'])),
'5': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'heavenly-spring-2019.json'])),
'6': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'snowboarding', '2019', 'kirkwood-spring-2019.json']))
}
},
'boating':
{
'2016':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'fishing-jan-2016.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'airshow-2016.json'])),
'3': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'fishing-dad-grandpa-july-2016.json'])),
'4': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2016', 'miniseason-2016.json']))
},
'2017':
{
'1': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2017', 'fishing-june-2017.json'])),
'2': gallery_json_loader(
os.path.join(path_to_static,
*['galleries', 'boating', '2017', 'keys-july-2017.json'])),
}
}
}
| true | true |
1c335a5cb2b2657745ae992aecdd688526eb9297 | 2,611 | py | Python | planning/domains/hiking/Generatorhikingproblem.py | xlbandy/fape | 8a00f9d4c20f722930c11d88b60e0e82f523a439 | [
"BSD-2-Clause"
] | 14 | 2017-01-09T23:25:12.000Z | 2022-02-16T12:08:48.000Z | planning/domains/hiking/Generatorhikingproblem.py | xlbandy/fape | 8a00f9d4c20f722930c11d88b60e0e82f523a439 | [
"BSD-2-Clause"
] | 7 | 2018-05-18T08:27:03.000Z | 2022-03-23T06:39:42.000Z | planning/domains/hiking/Generatorhikingproblem.py | xlbandy/fape | 8a00f9d4c20f722930c11d88b60e0e82f523a439 | [
"BSD-2-Clause"
] | 8 | 2016-12-09T13:31:43.000Z | 2022-02-16T12:08:50.000Z | from __future__ import division
import itertools
import json
import math
import os
import random
import shutil
import subprocess
import sys
loc = 3
hiker =2
car = 2
def main():
global loc
global hiker
global car
if len(sys.argv) > 1:
loc = int(sys.argv[1])
if len(sys.argv) > 2:
hiker = int(sys.argv[2])
if len(sys.argv) > 3:
car = int(sys.argv[3])
assert (loc > 1) ,"not enough locations"
printProblem()
def printProblem():
with open("hikingflat.p" + str(loc) + ".pb.anml", "w") as f:
f.write(problemflat())
with open("hikinghier.p" + str(loc) + ".pb.anml", "w") as f:
f.write(problemhier())
def problemflat():
f=""
f+="instance Location l0"
for i in range(1,loc):
f+=", l" + str(i)
f+=";\n"
f+="instance Car c0"
for i in range(1,car):
f+=", c" + str(i)
f+=";\n"
f+="instance Hiker h0"
for i in range(1,hiker):
f+=", h" + str(i)
f+=";\n"
f+="[all] contains {\n"
for l in range(loc-1):
for h in range (hiker):
f+="\tw" + str(l) + "h" + str(h) + " : walk(h" + str(h) + ",l" + str(l) + ",l" + str(l+1) + ");\n"
f+="\ts" + str(l) + "h" + str(h) + " : sleep(h" + str(h) + ",tent,l" + str(l+1) + ");\n"
f+="};\n"
for l in range(loc-1):
for h in range (1,hiker):
f+="start(w" + str(l) + "h0) = start(w" + str(l) + "h" + str(h) + ");\n"
for l in range(1,loc-1):
f+="end(w" + str(l-1) + "h0) < start(w" + str(l) + "h0);\n"
f+="[start] {\n\ttent.at := l0;\n"
for i in range (car):
f+="\tc" + str(i) + ".at := l0;\n"
for i in range (hiker):
f+="\th" + str(i) + ".at := l0;\n"
f+="\th" + str(i) + ".canWalk := true;\n"
f+="};\n"
return f
def problemhier():
f=""
f+="instance Place loc0"
for i in range(1,loc):
f+=", loc" + str(i)
f+=";\n"
f+="instance Car car0"
for i in range(1,car):
f+=", car" + str(i)
f+=";\n"
f+="instance Hiker hik0"
for i in range(1,hiker):
f+=", hik" + str(i)
f+=";\n"
f+="[start] {\n\ttent.at := loc0;\n"
for i in range (car-1):
f+="\tcar" + str(i) + ".at := loc0;\n"
f+="\tcar" + str(car-1) + ".at := loc1;\n"
for i in range (hiker):
f+="\thik" + str(i) + ".at := loc0;\n"
f+="\thik" + str(i) + ".canWalk := true;\n"
f+="};\n"
f+="[all] contains {\n"
for i in range(loc-2):
if (i%2 == 0):
f+="\to" + str(i) + " : oneStep(tent,loc" + str(i) + ",loc" + str(i+1) + ",loc" + str(i+2) +",hik0,hik1,car0,car1);\n"
else:
f+="\to" + str(i) + " : oneStep(tent,loc" + str(i) + ",loc" + str(i+1) + ",loc" + str(i+2) +",hik0,hik1,car1,car0);\n"
f+="};\n"
for i in range (loc-3):
f+="end(o" + str(i) + ") = start(o" + str(i+1) + ");\n"
return f
if __name__ == "__main__":
main()
| 25.105769 | 121 | 0.516277 | from __future__ import division
import itertools
import json
import math
import os
import random
import shutil
import subprocess
import sys
loc = 3
hiker =2
car = 2
def main():
global loc
global hiker
global car
if len(sys.argv) > 1:
loc = int(sys.argv[1])
if len(sys.argv) > 2:
hiker = int(sys.argv[2])
if len(sys.argv) > 3:
car = int(sys.argv[3])
assert (loc > 1) ,"not enough locations"
printProblem()
def printProblem():
with open("hikingflat.p" + str(loc) + ".pb.anml", "w") as f:
f.write(problemflat())
with open("hikinghier.p" + str(loc) + ".pb.anml", "w") as f:
f.write(problemhier())
def problemflat():
f=""
f+="instance Location l0"
for i in range(1,loc):
f+=", l" + str(i)
f+=";\n"
f+="instance Car c0"
for i in range(1,car):
f+=", c" + str(i)
f+=";\n"
f+="instance Hiker h0"
for i in range(1,hiker):
f+=", h" + str(i)
f+=";\n"
f+="[all] contains {\n"
for l in range(loc-1):
for h in range (hiker):
f+="\tw" + str(l) + "h" + str(h) + " : walk(h" + str(h) + ",l" + str(l) + ",l" + str(l+1) + ");\n"
f+="\ts" + str(l) + "h" + str(h) + " : sleep(h" + str(h) + ",tent,l" + str(l+1) + ");\n"
f+="};\n"
for l in range(loc-1):
for h in range (1,hiker):
f+="start(w" + str(l) + "h0) = start(w" + str(l) + "h" + str(h) + ");\n"
for l in range(1,loc-1):
f+="end(w" + str(l-1) + "h0) < start(w" + str(l) + "h0);\n"
f+="[start] {\n\ttent.at := l0;\n"
for i in range (car):
f+="\tc" + str(i) + ".at := l0;\n"
for i in range (hiker):
f+="\th" + str(i) + ".at := l0;\n"
f+="\th" + str(i) + ".canWalk := true;\n"
f+="};\n"
return f
def problemhier():
f=""
f+="instance Place loc0"
for i in range(1,loc):
f+=", loc" + str(i)
f+=";\n"
f+="instance Car car0"
for i in range(1,car):
f+=", car" + str(i)
f+=";\n"
f+="instance Hiker hik0"
for i in range(1,hiker):
f+=", hik" + str(i)
f+=";\n"
f+="[start] {\n\ttent.at := loc0;\n"
for i in range (car-1):
f+="\tcar" + str(i) + ".at := loc0;\n"
f+="\tcar" + str(car-1) + ".at := loc1;\n"
for i in range (hiker):
f+="\thik" + str(i) + ".at := loc0;\n"
f+="\thik" + str(i) + ".canWalk := true;\n"
f+="};\n"
f+="[all] contains {\n"
for i in range(loc-2):
if (i%2 == 0):
f+="\to" + str(i) + " : oneStep(tent,loc" + str(i) + ",loc" + str(i+1) + ",loc" + str(i+2) +",hik0,hik1,car0,car1);\n"
else:
f+="\to" + str(i) + " : oneStep(tent,loc" + str(i) + ",loc" + str(i+1) + ",loc" + str(i+2) +",hik0,hik1,car1,car0);\n"
f+="};\n"
for i in range (loc-3):
f+="end(o" + str(i) + ") = start(o" + str(i+1) + ");\n"
return f
if __name__ == "__main__":
main()
| true | true |
1c335b506bbcd73c2a19432394cb2b936afb8669 | 17,202 | py | Python | python/analysis/habernal_comparison/analyse_features.py | UKPLab/tacl2018-preference-convincing | 65eb1cd3bf76f8068889880e0f80178e790350ce | [
"Apache-2.0"
] | 13 | 2019-03-01T19:40:23.000Z | 2022-01-10T05:53:47.000Z | python/analysis/habernal_comparison/analyse_features.py | UKPLab/tacl2018-preference-convincing | 65eb1cd3bf76f8068889880e0f80178e790350ce | [
"Apache-2.0"
] | 12 | 2020-11-13T17:54:01.000Z | 2022-02-09T23:39:11.000Z | python/analysis/habernal_comparison/analyse_features.py | UKPLab/tacl2018-preference-convincing | 65eb1cd3bf76f8068889880e0f80178e790350ce | [
"Apache-2.0"
] | 5 | 2019-02-06T12:08:20.000Z | 2022-01-10T20:40:22.000Z | '''
Created on 1 Jun 2017
Load a set of feature lengthscales from a good run with 'both' types of features.
Sort them by lengthscale.
Plot the distribution.
Identify which type of feature they are: add colours or markers to the plot.
Provide a zoomed-in variant for the best 25 features.
@author: simpson
'''
import os, pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tests import load_embeddings, get_fold_data, TestRunner
from data_loader import load_train_test_data, load_ling_features
from matplotlib.ticker import MaxNLocator
if __name__ == '__main__':
# expt_folder_name = 'crowdsourcing_argumentation_opt/'
expt_folder_name = 'crowdsourcing_argumentation_expts/'
dataset = 'UKPConvArgStrict' # 'UKPConvArgAll_evalMACE'
methods = ['SinglePrefGP_weaksprior'] # ['SinglePrefGP_weaksprior_1104']
feature_type = 'both'
embeddings_type = 'word_mean'
di = 0.00
selected_folds_all = [[0, 1, 6, 12, 13]]
original_fold_order_file = './results/feature_analysis/foldorder_old.txt'
o_fold_order = np.genfromtxt(os.path.expanduser(original_fold_order_file), dtype=str)
mean_ls = None
for m, method in enumerate(methods):
data_root_dir = os.path.expanduser("~/data/personalised_argumentation/")
resultsfile_template = 'habernal_%s_%s_%s_%s_acc%.2f_di%.2f'
resultsfile = data_root_dir + 'outputdata/' + expt_folder_name + \
resultsfile_template % (dataset, method,
feature_type, embeddings_type, 1.0, di) + '_test.pkl'
resultsdir = data_root_dir + 'outputdata/' + expt_folder_name + \
resultsfile_template % (dataset, method,
feature_type, embeddings_type, 1.0, di)
foldorderfile = None
if foldorderfile is not None:
fold_order = np.genfromtxt(os.path.expanduser(foldorderfile),
dtype=str)
elif os.path.isfile(resultsdir + '/foldorder.txt'):
fold_order = np.genfromtxt(os.path.expanduser(resultsdir + '/foldorder.txt'),
dtype=str)
else:
fold_order = None
selected_folds = selected_folds_all[m]
nFolds = len(selected_folds)
if os.path.isfile(resultsfile):
with open(resultsfile, 'r') as fh:
data = pickle.load(fh)
if nFolds < 1:
nFolds = len(data[0])
else:
data = None
min_folds = 0
# Sort the features by their ID.
# If we have discarded some features that were all 0s, the current index will not be the original feature idx.
# How to map them back? Reload the original data and find out which features were discarded.
folds, _, folds_regression, word_index_to_embeddings_map, word_to_indices_map, index_to_word_map = load_train_test_data(dataset)
word_embeddings = load_embeddings(word_index_to_embeddings_map)
ling_feat_spmatrix, docids = load_ling_features(dataset)
#default_ls_value = compute_lengthscale_heuristic(feature_type, embeddings_type, word_embeddings,
# ling_feat_spmatrix, docids, folds, index_to_word_map)
for o_foldidx, o_fold in enumerate(o_fold_order):
if o_foldidx not in selected_folds:
continue
if fold_order is None: # fall back to the order on the current machine
foldidx = np.argwhere(np.array(list(folds.keys())) == o_fold)[0][0]
fold = list(folds.keys())[foldidx]
else:
foldidx = np.argwhere(fold_order == o_fold)[0][0]
fold = fold_order[foldidx]
if fold[-2] == "'" and fold[0] == "'":
fold = fold[1:-2]
elif fold[-1] == "'" and fold[0] == "'":
fold = fold[1:-1]
fold_order[foldidx] = fold
# look for new-style data in separate files for each fold. Prefer new-style if both are found.
foldfile = resultsdir + '/fold%i.pkl' % foldidx
if os.path.isfile(foldfile):
with open(foldfile, 'rb') as fh:
data_f = pickle.load(fh, encoding='latin1')
else: # convert the old stuff to new stuff
if data is None:
min_folds = foldidx+1
print('Skipping fold with no data %i' % foldidx)
print("Skipping results for %s, %s, %s, %s" % (method,
dataset,
feature_type,
embeddings_type))
print("Skipped filename was: %s, old-style results file would be %s" % (foldfile,
resultsfile))
continue
if not os.path.isdir(resultsdir):
os.mkdir(resultsdir)
data_f = []
for thing in data:
if foldidx in thing:
data_f.append(thing[foldidx])
else:
data_f.append(thing)
with open(foldfile, 'wb') as fh:
pickle.dump(data_f, fh)
trainids_a1, trainids_a2, prefs_train, personIDs_train, testids_a1, testids_a2, prefs_test, personIDs_test, \
X, uids, utexts, _ = get_fold_data(folds, fold, docids)
# get the embedding values for the test data -- need to find embeddings of the whole piece of text
runner = TestRunner('crowdsourcing_argumentation_expts_first_submission', [dataset], [feature_type],
[embeddings_type], [method], 0)
runner.embeddings = word_embeddings
runner.X = X
runner.ling_feat_spmatrix = ling_feat_spmatrix
runner.load_features(feature_type, embeddings_type, trainids_a1, trainids_a2, uids)
items_feat = runner.items_feat
valid_feats = runner.valid_feats
min_vals = np.min(items_feat, axis=0)
max_vals = np.max(items_feat, axis=0)
nfeats = len(valid_feats)
# take the mean ls for each feature across the folds
if mean_ls is None:
mean_ls = np.zeros(nfeats, dtype=float)
totals = np.zeros(nfeats, dtype=int)
#print "Warning: not computing means."
learned_ls = data_f[7]
initial_ls = data_f[5] #/ float(len(valid_feats)) # we want the data relative to the median -- the initial LS were also scaled by no. features
mean_ls[valid_feats] += learned_ls / initial_ls # normalisation in original drafts
norm_ls = learned_ls / (max_vals - min_vals)
#mean_ls[valid_feats] += norm_ls
print("Max normed l: %f" % np.max(norm_ls))
totals[valid_feats] += 1
#mean_ls = mean_ls[valid_feats]
#totals = totals[valid_feats]
mean_ls[totals != 0] = mean_ls[totals != 0] / totals[totals != 0]
if feature_type == 'debug':
feat_cats = np.array(['one', 'two', 'three'])
featnames = feat_cats
col = np.array(['r', 'lightgreen', 'b'])
marks = np.array(['2', 'p', '^'])
nembeddings = 3
else:
# assign category labels to each feature
feat_cats = np.empty(nfeats, dtype=object)
nembeddings = word_embeddings.shape[1]
feat_cats[:nembeddings] = "embeddings"
catnames = np.array(['embeddings', '_pos_ngram', 'ProductionRule', 'Rate', 'CONTEXTUALITY_MEASURE_FN',
'ExclamationRatio', 'upperCaseRatio', 'Ratio', 'DependencyTreeDepth', 'Modal',
'sentiment', 'oovWordsCount', 'spell_skill', '_length', 'word_more', 'Ending', 'ner.type.', '_'])
special_catnames = np.array(['flesch', 'coleman', 'ari'])
marks = np.array(['2', 'p', '^', 'H', 'x', ',', 'D', '<', '>', 'v', ',', '8', '1', 'o', '*'])
col = np.array(['r', 'lightgreen', 'b', 'y', 'purple', 'black', 'darkgoldenrod', 'magenta', 'darkgreen', 'darkblue',
'brown', 'darkgray', 'orange', 'dodgerblue', 'lightgray', 'cyan', ])
with open(data_root_dir + "/tempdata/feature_names_all3.txt", 'r') as fh:
lines = fh.readlines()
featnames = lines[0].strip()
featidxs = lines[1].strip()
if featnames[-1] == ']':
featnames = featnames[:-1]
if featnames[0] == '[':
featnames = featnames[1:]
featidxs = np.fromstring(featidxs, dtype=int, sep=',') + nembeddings
featnames = np.array(featnames.split(', '), dtype=str)
for f, fname in enumerate(featnames):
featnames[f] = featnames[f][2:] # skip the a1 bit at the start
for catname in special_catnames:
if catname == fname:
print("%i, Recognised %s as special cat %s" % (f, fname, catname))
feat_cats[nembeddings + f] = catname
for catname in catnames:
if catname in fname:
print("%i, Recognised %s as type %s" % (f, fname, catname))
feat_cats[nembeddings + f] = catname
break
if not feat_cats[nembeddings + f]:
print("%i, Unrecognised language feature: %s" % (f, fname))
feat_cats[nembeddings + f] = 'ngram'
for catname in catnames:
print("No. features in category %s = %i" % (catname, np.sum(feat_cats == catname)))
feat_cats[feat_cats=='_'] = 'ngram'
# readability
feat_cats[feat_cats=='ari'] = 'vocab/surface'
feat_cats[feat_cats=='coleman'] = 'vocab/surface'
feat_cats[feat_cats=='flesch'] = 'vocab/surface'
feat_cats[feat_cats=='Rate'] = 'other'
feat_cats[feat_cats=='Ratio'] = 'other'
feat_cats[feat_cats=='Modal'] = 'other'
feat_cats[feat_cats=='CONTEXTUALITY_MEASURE_FN'] = 'other'
feat_cats[feat_cats == 'Ending'] = 'other'
feat_cats[feat_cats=='_pos_ngram'] = 'POS'
feat_cats[feat_cats=='_length'] = 'other'
feat_cats[feat_cats=='word_more'] = 'other'
feat_cats[feat_cats=='upperCaseRatio'] = 'other'
feat_cats[feat_cats=='oovWordsCount'] = 'other'
feat_cats[feat_cats=='spell_skill'] = 'other'
feat_cats[feat_cats=='ExclamationRatio'] = 'other'
feat_cats[feat_cats=='DependencyTreeDepth'] = 'other'
feat_cats[feat_cats=='ProductionRule'] = 'prod. rule'
feat_cats[feat_cats=='ner.type.'] = 'other'
feat_cats[feat_cats=='sentiment'] = 'other'
# for f in range(len(feat_cats)):
# feat_cats[f] = feat_cats[f].lower()
print("After combining some categories.............................")
for catname in np.unique(feat_cats):
print("No. features in category %s = %i" % (catname, np.sum(feat_cats == catname)))
# sort by length scale
sorted_idxs = np.argsort(mean_ls)
sorted_vals = mean_ls[sorted_idxs]
# ignore those that were not valid
sorted_vals = sorted_vals[totals[sorted_idxs]>0]
sorted_idxs = sorted_idxs[totals[sorted_idxs]>0]
sorted_cats = feat_cats[sorted_idxs]
sorted_cats = sorted_cats[totals[sorted_idxs]>0]
embeddingnames = np.empty(nembeddings, dtype=object)
for e in range(nembeddings):
embeddingnames[e] = 'Emb_dimension_%i' % e
featnames = np.concatenate((embeddingnames, featnames))
sorted_featnames = featnames[sorted_idxs]
sorted_featnames = sorted_featnames[totals[sorted_idxs]>0]
'''
An alternative to plotting the distributions would be to list the top ten most important and least important features.
'''
figure_path = os.path.expanduser('./documents/pref_learning_for_convincingness/figures/features2/')
np.savetxt(figure_path + '/feature_table.tex', np.concatenate((sorted_featnames[:, None], sorted_vals[:, None]),
axis=1), fmt='%s & %.5f \\nonumber\\\\')
cat_arr = []
labels = []
for c, cat in enumerate(np.unique(feat_cats)):
clengthscales = sorted_vals[sorted_cats == cat]
cat_arr.append(clengthscales)
labels.append(cat)
# # Try a histogram instead? For each length-scale band, how many features of each type are there?
# plt.figure()
#
# plt.hist(cat_arr, label=labels, color=col[:len(labels)], histtype='bar',
# bins=np.logspace(np.log10(1), np.log10(100000), 18), density=True) # density=True causes the values to be normalised
# plt.xlabel('length-scale')
# plt.ylabel('log_10 no. features')
# plt.legend(loc='best')
# plt.gca().set_xscale('log')
#
# plt.savefig(figure_path + 'hist.pdf')
# produce content for a latex table
matplotlib.rcParams.update({'font.size': 16})
plt.figure(figsize=(10,3))
meds = []
low = []
high = []
mins = []
maxs = []
vals = []
for c, cat in enumerate(np.unique(feat_cats)):
clengthscales = sorted_vals[sorted_cats == cat]
#print '%s & %s & %s' & (cat, np.median(clengthscales), np.percentile(clengthscales, 25), np.percentile(clengthscales, 75))
#meds.append(np.median(clengthscales))
#low.append(np.percentile(clengthscales, 25))
#high.append(np.percentile(clengthscales, 75))
#mins.append(np.min(clengthscales))
#maxs.append(np.max(clengthscales))
vals.append(clengthscales)
ax = plt.subplot(1, len(np.unique(feat_cats)), c+1)
#plt.xlim(0, 20)
plt.hist(clengthscales, label=labels[c], color='blue', histtype='bar',
#bins=np.logspace(np.log10(100), np.log10(100000), 24), density=False, orientation='horizontal')
#bins = np.logspace(np.log10(5500), np.log10(34000), 24), density = False, orientation = 'horizontal')
bins=np.arange(30) * 0.02 + 0.52, density=False, orientation='horizontal')
# ax.set_yscale('log')
#
if c == 0:
plt.ylabel('length-scale')# x10^3')
#ax.get_yaxis().set_ticks([6e3, 1e4, 2e4, 3e4])
#ax.get_yaxis().set_ticklabels(['6', '10', '20', '30'])
else:
ax.get_yaxis().set_ticks([])
ax.get_yaxis().set_ticklabels([])
#ax.get_xaxis().set_ticks([]) # write the x axis limits in the caption!!!
plt.title(cat)
#plt.gca().yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# for i, v in enumerate(vals):
# vals[i] = np.log10(v)
#bp = plt.boxplot(vals, labels=labels, notch=0, whiskerprops={'linestyle':'solid'},
# patch_artist=True)
#plt.setp(bp['boxes'], color='black')
#plt.setp(bp['whiskers'], color='black')
#for patch in bp['boxes']:
# patch.set_facecolor('tan')
# yrange = np.arange(-2, 3)
# plt.gca().set_yticks(yrange)
# plt.gca().set_yticklabels(10.0**yrange)
# plt.gca().set_axisbelow(True)
#plt.ylim(0,3)
plt.savefig(figure_path + 'boxplot.pdf')
############
# plt.figure()
#
# rowsize = 5
#
# for c, cat in enumerate(np.unique(feat_cats)):
# clengthscales = sorted_vals[sorted_cats == cat]
# #plt.scatter(clengthscales, np.zeros(len(clengthscales)) + (1+c)*1000, marker=marks[c], color=col[c])
# ax = plt.subplot(len(labels)/rowsize + 1, rowsize, c+1)
# plt.plot(clengthscales, color=col[c], label=cat, marker=marks[c], linewidth=0)
# plt.title(cat)
# plt.ylim(np.min(sorted_vals), np.max(sorted_vals))
#
# frame1 = plt.gca()
# if np.mod(c, rowsize):
# frame1.axes.get_yaxis().set_ticks([])
# else:
# plt.ylabel('length-scale')
# ax.xaxis.set_major_locator(MaxNLocator(nbins=2))
#
# plt.xlabel('features')
# plt.show()
output = np.concatenate((sorted_cats[:, None], featnames[sorted_idxs][:, None], sorted_vals[:, None]), axis=1)
np.savetxt("./results/feature_analysis/features.tsv", output, fmt='%s\t%s\t%s\t', delimiter='\t', header='category, feature_name, length-scale')
# repeat this but make a separate sorted file by category
for catname in np.unique(sorted_cats):
catidxs = sorted_cats == catname
output = np.concatenate((sorted_cats[catidxs, None], featnames[sorted_idxs][catidxs, None],
sorted_vals[catidxs, None]), axis=1)
np.savetxt("./results/feature_analysis/features_%s.tsv" % catname, output, fmt='%s\t%s\t%s\t', delimiter='\t',
header='category, feature_name, length-scale')
print('all done.') | 41.651332 | 154 | 0.578421 |
import os, pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tests import load_embeddings, get_fold_data, TestRunner
from data_loader import load_train_test_data, load_ling_features
from matplotlib.ticker import MaxNLocator
if __name__ == '__main__':
expt_folder_name = 'crowdsourcing_argumentation_expts/'
dataset = 'UKPConvArgStrict'
methods = ['SinglePrefGP_weaksprior']
feature_type = 'both'
embeddings_type = 'word_mean'
di = 0.00
selected_folds_all = [[0, 1, 6, 12, 13]]
original_fold_order_file = './results/feature_analysis/foldorder_old.txt'
o_fold_order = np.genfromtxt(os.path.expanduser(original_fold_order_file), dtype=str)
mean_ls = None
for m, method in enumerate(methods):
data_root_dir = os.path.expanduser("~/data/personalised_argumentation/")
resultsfile_template = 'habernal_%s_%s_%s_%s_acc%.2f_di%.2f'
resultsfile = data_root_dir + 'outputdata/' + expt_folder_name + \
resultsfile_template % (dataset, method,
feature_type, embeddings_type, 1.0, di) + '_test.pkl'
resultsdir = data_root_dir + 'outputdata/' + expt_folder_name + \
resultsfile_template % (dataset, method,
feature_type, embeddings_type, 1.0, di)
foldorderfile = None
if foldorderfile is not None:
fold_order = np.genfromtxt(os.path.expanduser(foldorderfile),
dtype=str)
elif os.path.isfile(resultsdir + '/foldorder.txt'):
fold_order = np.genfromtxt(os.path.expanduser(resultsdir + '/foldorder.txt'),
dtype=str)
else:
fold_order = None
selected_folds = selected_folds_all[m]
nFolds = len(selected_folds)
if os.path.isfile(resultsfile):
with open(resultsfile, 'r') as fh:
data = pickle.load(fh)
if nFolds < 1:
nFolds = len(data[0])
else:
data = None
min_folds = 0
folds, _, folds_regression, word_index_to_embeddings_map, word_to_indices_map, index_to_word_map = load_train_test_data(dataset)
word_embeddings = load_embeddings(word_index_to_embeddings_map)
ling_feat_spmatrix, docids = load_ling_features(dataset)
for o_foldidx, o_fold in enumerate(o_fold_order):
if o_foldidx not in selected_folds:
continue
if fold_order is None:
foldidx = np.argwhere(np.array(list(folds.keys())) == o_fold)[0][0]
fold = list(folds.keys())[foldidx]
else:
foldidx = np.argwhere(fold_order == o_fold)[0][0]
fold = fold_order[foldidx]
if fold[-2] == "'" and fold[0] == "'":
fold = fold[1:-2]
elif fold[-1] == "'" and fold[0] == "'":
fold = fold[1:-1]
fold_order[foldidx] = fold
foldfile = resultsdir + '/fold%i.pkl' % foldidx
if os.path.isfile(foldfile):
with open(foldfile, 'rb') as fh:
data_f = pickle.load(fh, encoding='latin1')
else:
if data is None:
min_folds = foldidx+1
print('Skipping fold with no data %i' % foldidx)
print("Skipping results for %s, %s, %s, %s" % (method,
dataset,
feature_type,
embeddings_type))
print("Skipped filename was: %s, old-style results file would be %s" % (foldfile,
resultsfile))
continue
if not os.path.isdir(resultsdir):
os.mkdir(resultsdir)
data_f = []
for thing in data:
if foldidx in thing:
data_f.append(thing[foldidx])
else:
data_f.append(thing)
with open(foldfile, 'wb') as fh:
pickle.dump(data_f, fh)
trainids_a1, trainids_a2, prefs_train, personIDs_train, testids_a1, testids_a2, prefs_test, personIDs_test, \
X, uids, utexts, _ = get_fold_data(folds, fold, docids)
runner = TestRunner('crowdsourcing_argumentation_expts_first_submission', [dataset], [feature_type],
[embeddings_type], [method], 0)
runner.embeddings = word_embeddings
runner.X = X
runner.ling_feat_spmatrix = ling_feat_spmatrix
runner.load_features(feature_type, embeddings_type, trainids_a1, trainids_a2, uids)
items_feat = runner.items_feat
valid_feats = runner.valid_feats
min_vals = np.min(items_feat, axis=0)
max_vals = np.max(items_feat, axis=0)
nfeats = len(valid_feats)
if mean_ls is None:
mean_ls = np.zeros(nfeats, dtype=float)
totals = np.zeros(nfeats, dtype=int)
learned_ls = data_f[7]
initial_ls = data_f[5] ls / (max_vals - min_vals)
print("Max normed l: %f" % np.max(norm_ls))
totals[valid_feats] += 1
mean_ls[totals != 0] = mean_ls[totals != 0] / totals[totals != 0]
if feature_type == 'debug':
feat_cats = np.array(['one', 'two', 'three'])
featnames = feat_cats
col = np.array(['r', 'lightgreen', 'b'])
marks = np.array(['2', 'p', '^'])
nembeddings = 3
else:
feat_cats = np.empty(nfeats, dtype=object)
nembeddings = word_embeddings.shape[1]
feat_cats[:nembeddings] = "embeddings"
catnames = np.array(['embeddings', '_pos_ngram', 'ProductionRule', 'Rate', 'CONTEXTUALITY_MEASURE_FN',
'ExclamationRatio', 'upperCaseRatio', 'Ratio', 'DependencyTreeDepth', 'Modal',
'sentiment', 'oovWordsCount', 'spell_skill', '_length', 'word_more', 'Ending', 'ner.type.', '_'])
special_catnames = np.array(['flesch', 'coleman', 'ari'])
marks = np.array(['2', 'p', '^', 'H', 'x', ',', 'D', '<', '>', 'v', ',', '8', '1', 'o', '*'])
col = np.array(['r', 'lightgreen', 'b', 'y', 'purple', 'black', 'darkgoldenrod', 'magenta', 'darkgreen', 'darkblue',
'brown', 'darkgray', 'orange', 'dodgerblue', 'lightgray', 'cyan', ])
with open(data_root_dir + "/tempdata/feature_names_all3.txt", 'r') as fh:
lines = fh.readlines()
featnames = lines[0].strip()
featidxs = lines[1].strip()
if featnames[-1] == ']':
featnames = featnames[:-1]
if featnames[0] == '[':
featnames = featnames[1:]
featidxs = np.fromstring(featidxs, dtype=int, sep=',') + nembeddings
featnames = np.array(featnames.split(', '), dtype=str)
for f, fname in enumerate(featnames):
featnames[f] = featnames[f][2:]
for catname in special_catnames:
if catname == fname:
print("%i, Recognised %s as special cat %s" % (f, fname, catname))
feat_cats[nembeddings + f] = catname
for catname in catnames:
if catname in fname:
print("%i, Recognised %s as type %s" % (f, fname, catname))
feat_cats[nembeddings + f] = catname
break
if not feat_cats[nembeddings + f]:
print("%i, Unrecognised language feature: %s" % (f, fname))
feat_cats[nembeddings + f] = 'ngram'
for catname in catnames:
print("No. features in category %s = %i" % (catname, np.sum(feat_cats == catname)))
feat_cats[feat_cats=='_'] = 'ngram'
feat_cats[feat_cats=='ari'] = 'vocab/surface'
feat_cats[feat_cats=='coleman'] = 'vocab/surface'
feat_cats[feat_cats=='flesch'] = 'vocab/surface'
feat_cats[feat_cats=='Rate'] = 'other'
feat_cats[feat_cats=='Ratio'] = 'other'
feat_cats[feat_cats=='Modal'] = 'other'
feat_cats[feat_cats=='CONTEXTUALITY_MEASURE_FN'] = 'other'
feat_cats[feat_cats == 'Ending'] = 'other'
feat_cats[feat_cats=='_pos_ngram'] = 'POS'
feat_cats[feat_cats=='_length'] = 'other'
feat_cats[feat_cats=='word_more'] = 'other'
feat_cats[feat_cats=='upperCaseRatio'] = 'other'
feat_cats[feat_cats=='oovWordsCount'] = 'other'
feat_cats[feat_cats=='spell_skill'] = 'other'
feat_cats[feat_cats=='ExclamationRatio'] = 'other'
feat_cats[feat_cats=='DependencyTreeDepth'] = 'other'
feat_cats[feat_cats=='ProductionRule'] = 'prod. rule'
feat_cats[feat_cats=='ner.type.'] = 'other'
feat_cats[feat_cats=='sentiment'] = 'other'
print("After combining some categories.............................")
for catname in np.unique(feat_cats):
print("No. features in category %s = %i" % (catname, np.sum(feat_cats == catname)))
sorted_idxs = np.argsort(mean_ls)
sorted_vals = mean_ls[sorted_idxs]
sorted_vals = sorted_vals[totals[sorted_idxs]>0]
sorted_idxs = sorted_idxs[totals[sorted_idxs]>0]
sorted_cats = feat_cats[sorted_idxs]
sorted_cats = sorted_cats[totals[sorted_idxs]>0]
embeddingnames = np.empty(nembeddings, dtype=object)
for e in range(nembeddings):
embeddingnames[e] = 'Emb_dimension_%i' % e
featnames = np.concatenate((embeddingnames, featnames))
sorted_featnames = featnames[sorted_idxs]
sorted_featnames = sorted_featnames[totals[sorted_idxs]>0]
figure_path = os.path.expanduser('./documents/pref_learning_for_convincingness/figures/features2/')
np.savetxt(figure_path + '/feature_table.tex', np.concatenate((sorted_featnames[:, None], sorted_vals[:, None]),
axis=1), fmt='%s & %.5f \\nonumber\\\\')
cat_arr = []
labels = []
for c, cat in enumerate(np.unique(feat_cats)):
clengthscales = sorted_vals[sorted_cats == cat]
cat_arr.append(clengthscales)
labels.append(cat)
meds = []
low = []
high = []
mins = []
maxs = []
vals = []
for c, cat in enumerate(np.unique(feat_cats)):
clengthscales = sorted_vals[sorted_cats == cat]
vals.append(clengthscales)
ax = plt.subplot(1, len(np.unique(feat_cats)), c+1)
plt.hist(clengthscales, label=labels[c], color='blue', histtype='bar',
bins=np.arange(30) * 0.02 + 0.52, density=False, orientation='horizontal')
if c == 0:
plt.ylabel('length-scale')
#ax.get_yaxis().set_ticks([6e3, 1e4, 2e4, 3e4])
#ax.get_yaxis().set_ticklabels(['6', '10', '20', '30'])
else:
ax.get_yaxis().set_ticks([])
ax.get_yaxis().set_ticklabels([])
#ax.get_xaxis().set_ticks([]) # write the x axis limits in the caption!!!
plt.title(cat)
#plt.gca().yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# for i, v in enumerate(vals):
# vals[i] = np.log10(v)
#bp = plt.boxplot(vals, labels=labels, notch=0, whiskerprops={'linestyle':'solid'},
# patch_artist=True)
#plt.setp(bp['boxes'], color='black')
#plt.setp(bp['whiskers'], color='black')
#for patch in bp['boxes']:
# patch.set_facecolor('tan')
# yrange = np.arange(-2, 3)
# plt.gca().set_yticks(yrange)
# plt.gca().set_yticklabels(10.0**yrange)
# plt.gca().set_axisbelow(True)
#plt.ylim(0,3)
plt.savefig(figure_path + 'boxplot.pdf')
############
# plt.figure()
#
# rowsize = 5
#
# for c, cat in enumerate(np.unique(feat_cats)):
# clengthscales = sorted_vals[sorted_cats == cat]
# #plt.scatter(clengthscales, np.zeros(len(clengthscales)) + (1+c)*1000, marker=marks[c], color=col[c])
# ax = plt.subplot(len(labels)/rowsize + 1, rowsize, c+1)
# plt.plot(clengthscales, color=col[c], label=cat, marker=marks[c], linewidth=0)
# plt.title(cat)
# plt.ylim(np.min(sorted_vals), np.max(sorted_vals))
#
# frame1 = plt.gca()
# if np.mod(c, rowsize):
# frame1.axes.get_yaxis().set_ticks([])
# else:
# plt.ylabel('length-scale')
# ax.xaxis.set_major_locator(MaxNLocator(nbins=2))
#
# plt.xlabel('features')
# plt.show()
output = np.concatenate((sorted_cats[:, None], featnames[sorted_idxs][:, None], sorted_vals[:, None]), axis=1)
np.savetxt("./results/feature_analysis/features.tsv", output, fmt='%s\t%s\t%s\t', delimiter='\t', header='category, feature_name, length-scale')
# repeat this but make a separate sorted file by category
for catname in np.unique(sorted_cats):
catidxs = sorted_cats == catname
output = np.concatenate((sorted_cats[catidxs, None], featnames[sorted_idxs][catidxs, None],
sorted_vals[catidxs, None]), axis=1)
np.savetxt("./results/feature_analysis/features_%s.tsv" % catname, output, fmt='%s\t%s\t%s\t', delimiter='\t',
header='category, feature_name, length-scale')
print('all done.') | true | true |
1c335c7a96e78dad784537a49f59c108c83557f7 | 1,642 | py | Python | placethings/ilp/method.py | kumokay/placethings | c1fa9aace89be5766e7aa24e4df8bc5d5ca8fa66 | [
"Apache-2.0"
] | 2 | 2019-09-28T09:36:29.000Z | 2019-11-14T20:48:24.000Z | placethings/ilp/method.py | kumokay/placethings | c1fa9aace89be5766e7aa24e4df8bc5d5ca8fa66 | [
"Apache-2.0"
] | 1 | 2019-07-24T20:54:46.000Z | 2019-08-09T20:07:43.000Z | placethings/ilp/method.py | kumokay/placethings | c1fa9aace89be5766e7aa24e4df8bc5d5ca8fa66 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from copy import deepcopy
import logging
import pulp
# NOTE: to use glpk solver, sudo apt-get install glpk-utils
from placethings.graph_gen import task_graph
from placethings.ilp import utils, solver
log = logging.getLogger()
def get_max_latency(Gt, Gd, result_mapping, use_assigned_latency=True):
src_list, dst_list, all_paths = utils.find_all_simple_path(Gt)
max_latency = 0
for path in all_paths:
path_length = solver.get_path_length(
path, Gt, Gd, result_mapping, use_assigned_latency)
max_latency = max(path_length, max_latency)
return max_latency
def place_things(
Gt_ro, Gnd_ro, is_export, export_suffix='', use_assigned_latency=True):
Gt = deepcopy(Gt_ro)
Gnd = deepcopy(Gnd_ro)
status, result_mapping, result_latency = solver.solve(
Gt, Gnd, use_assigned_latency)
assert status == pulp.constants.LpStatusOptimal
log.info('solver status: {}'.format(pulp.LpStatus[status]))
log.info('check solution for all simple path from src to dst')
max_latency = get_max_latency(
Gt, Gnd, result_mapping, use_assigned_latency=use_assigned_latency)
log.info('max_latency={}'.format(max_latency))
log.info('result_mapping={}'.format(result_mapping))
log.info('result_latency={}'.format(result_latency))
# update mapping and gen node labels
Gt = task_graph.update_graph(
result_mapping, result_latency, Gt, Gnd, is_export, export_suffix)
return Gt, result_mapping
| 34.93617 | 79 | 0.744823 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from copy import deepcopy
import logging
import pulp
from placethings.graph_gen import task_graph
from placethings.ilp import utils, solver
log = logging.getLogger()
def get_max_latency(Gt, Gd, result_mapping, use_assigned_latency=True):
src_list, dst_list, all_paths = utils.find_all_simple_path(Gt)
max_latency = 0
for path in all_paths:
path_length = solver.get_path_length(
path, Gt, Gd, result_mapping, use_assigned_latency)
max_latency = max(path_length, max_latency)
return max_latency
def place_things(
Gt_ro, Gnd_ro, is_export, export_suffix='', use_assigned_latency=True):
Gt = deepcopy(Gt_ro)
Gnd = deepcopy(Gnd_ro)
status, result_mapping, result_latency = solver.solve(
Gt, Gnd, use_assigned_latency)
assert status == pulp.constants.LpStatusOptimal
log.info('solver status: {}'.format(pulp.LpStatus[status]))
log.info('check solution for all simple path from src to dst')
max_latency = get_max_latency(
Gt, Gnd, result_mapping, use_assigned_latency=use_assigned_latency)
log.info('max_latency={}'.format(max_latency))
log.info('result_mapping={}'.format(result_mapping))
log.info('result_latency={}'.format(result_latency))
Gt = task_graph.update_graph(
result_mapping, result_latency, Gt, Gnd, is_export, export_suffix)
return Gt, result_mapping
| true | true |
1c335e4f3a102a7a5ae4cc8c6bb0efa75cb6e72d | 2,468 | py | Python | 5. Probability and Statistics/statistics-intermediate/The Mode-307.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | 5. Probability and Statistics/statistics-intermediate/The Mode-307.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | 5. Probability and Statistics/statistics-intermediate/The Mode-307.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | ## 1. Introduction ##
import pandas as pd
houses = pd.read_table('AmesHousing_1.txt')
print(houses[['Land Slope','Roof Style','Kitchen AbvGr']])
scale_land = 'ordinal'
scale_roof = 'nominal'
kitchen_variable = 'discrete'
## 2. The Mode for Ordinal Variables ##
def mode(array):
counts = {}
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return max(counts, key = counts.get)
mode_function = mode(houses['Land Slope'])
mode_method = houses['Land Slope'].mode()
same = (mode_function == mode_method)
## 3. The Mode for Nominal Variables ##
# The function we wrote (you can copy-paste yours from the previous screen)
def mode(array):
counts = {}
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return max(counts, key = counts.get)
def mode(array):
counts = {}
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return (max(counts, key = counts.get),
counts
)
mode, value_counts = mode(houses['Roof Style'])
## 4. The Mode for Discrete Variables ##
bedroom_variable = 'discrete'
bedroom_mode = houses['Bedroom AbvGr'].mode()
price_variable = 'continuous'
## 5. Special Cases ##
intervals = pd.interval_range(start = 0, end = 800000, freq = 100000)
gr_freq_table = pd.Series([0,0,0,0,0,0,0,0], index = intervals)
for value in houses['SalePrice']:
for interval in intervals:
if value in interval:
gr_freq_table.loc[interval] += 1
break
print(gr_freq_table)
mode = 150000
mean = houses['SalePrice'].mean()
median = houses['SalePrice'].median()
sentence_1 = True
sentence_2 = True
## 6. Skewed Distributions ##
distribution_1 = {'mean': 3021 , 'median': 3001, 'mode': 2947}
distribution_2 = {'median': 924 , 'mode': 832, 'mean': 962}
distribution_3 = {'mode': 202, 'mean': 143, 'median': 199}
shape_1 = 'right skew'
shape_2 = 'right skew'
shape_3 = 'left skew'
## 7. Symmetrical Distributions ##
houses['Mo Sold'].plot.kde(xlim = [1,12])
import matplotlib.pyplot as plt
plt.axvline(houses['Mo Sold'].mode()[0], color = 'Green', label = 'Mode')
plt.axvline(houses['Mo Sold'].median(), color = 'Orange', label = 'Median')
plt.axvline(houses['Mo Sold'].mean(), color = 'Black', label = 'Mean')
plt.legend() | 23.961165 | 75 | 0.623987 | houses = pd.read_table('AmesHousing_1.txt')
print(houses[['Land Slope','Roof Style','Kitchen AbvGr']])
scale_land = 'ordinal'
scale_roof = 'nominal'
kitchen_variable = 'discrete'
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return max(counts, key = counts.get)
mode_function = mode(houses['Land Slope'])
mode_method = houses['Land Slope'].mode()
same = (mode_function == mode_method)
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return max(counts, key = counts.get)
def mode(array):
counts = {}
for value in array:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return (max(counts, key = counts.get),
counts
)
mode, value_counts = mode(houses['Roof Style'])
de = houses['Bedroom AbvGr'].mode()
price_variable = 'continuous'
l_range(start = 0, end = 800000, freq = 100000)
gr_freq_table = pd.Series([0,0,0,0,0,0,0,0], index = intervals)
for value in houses['SalePrice']:
for interval in intervals:
if value in interval:
gr_freq_table.loc[interval] += 1
break
print(gr_freq_table)
mode = 150000
mean = houses['SalePrice'].mean()
median = houses['SalePrice'].median()
sentence_1 = True
sentence_2 = True
1 , 'median': 3001, 'mode': 2947}
distribution_2 = {'median': 924 , 'mode': 832, 'mean': 962}
distribution_3 = {'mode': 202, 'mean': 143, 'median': 199}
shape_1 = 'right skew'
shape_2 = 'right skew'
shape_3 = 'left skew'
[1,12])
import matplotlib.pyplot as plt
plt.axvline(houses['Mo Sold'].mode()[0], color = 'Green', label = 'Mode')
plt.axvline(houses['Mo Sold'].median(), color = 'Orange', label = 'Median')
plt.axvline(houses['Mo Sold'].mean(), color = 'Black', label = 'Mean')
plt.legend() | true | true |
1c33613869982dc2d894924cc6286d86815c454a | 1,417 | py | Python | tests/lit/lit/formats/alivetest.py | rutgers-apl/alive-loops | 2ce0d2f02f4c1554451f9741520ffe7e92b4b50e | [
"Apache-2.0"
] | 4 | 2016-04-07T17:30:00.000Z | 2018-12-02T00:06:28.000Z | tests/lit/lit/formats/alivetest.py | rutgers-apl/alive-loops | 2ce0d2f02f4c1554451f9741520ffe7e92b4b50e | [
"Apache-2.0"
] | null | null | null | tests/lit/lit/formats/alivetest.py | rutgers-apl/alive-loops | 2ce0d2f02f4c1554451f9741520ffe7e92b4b50e | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import re, string, subprocess, signal
import lit.Test
from .base import FileBasedTest
def executeCommand(command, input):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.write(input)
out,err = p.communicate()
exitCode = p.wait()
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
out = str(out.decode('ascii'))
except:
out = str(out)
try:
err = str(err.decode('ascii'))
except:
err = str(err)
return out, err, exitCode
def readFile(path):
fd = open(path, 'r')
return fd.read()
class AliveTest(FileBasedTest):
def __init__(self):
self.regex = re.compile(r";\s*(ERROR:.*)")
def execute(self, test, litConfig):
test = test.getSourcePath()
cmd = ['python', 'alive.py']
input = readFile(test)
out, err, exitCode = executeCommand(cmd, input)
m = self.regex.search(input)
if m == None:
if exitCode == 0 and string.find(out, 'Optimization is correct!') != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
if exitCode == 255 and string.find(out, m.group(1)) != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
| 24.431034 | 78 | 0.625265 | from __future__ import absolute_import
import re, string, subprocess, signal
import lit.Test
from .base import FileBasedTest
def executeCommand(command, input):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.write(input)
out,err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
try:
out = str(out.decode('ascii'))
except:
out = str(out)
try:
err = str(err.decode('ascii'))
except:
err = str(err)
return out, err, exitCode
def readFile(path):
fd = open(path, 'r')
return fd.read()
class AliveTest(FileBasedTest):
def __init__(self):
self.regex = re.compile(r";\s*(ERROR:.*)")
def execute(self, test, litConfig):
test = test.getSourcePath()
cmd = ['python', 'alive.py']
input = readFile(test)
out, err, exitCode = executeCommand(cmd, input)
m = self.regex.search(input)
if m == None:
if exitCode == 0 and string.find(out, 'Optimization is correct!') != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
if exitCode == 255 and string.find(out, m.group(1)) != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
| true | true |
1c33622dace13df17153cec3a22ea9743fed0d6e | 1,447 | py | Python | tests/test_fast/test_deadlock.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 86 | 2015-02-17T11:41:18.000Z | 2022-03-05T08:05:29.000Z | tests/test_fast/test_deadlock.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 18 | 2015-02-11T21:03:13.000Z | 2022-03-20T14:32:51.000Z | tests/test_fast/test_deadlock.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 22 | 2015-02-11T20:47:00.000Z | 2021-11-01T15:26:23.000Z | import os
import sys
import threading
from time import sleep
import pytest
from pyvirtualdisplay.display import Display
from easyprocess import EasyProcess
python = sys.executable
# requirement: apt install imagemagick
# deadlock
# popen.communicate() hangs
# no deadlock with temp_files
PROG = """
from PIL import Image
Image.new("RGB",(99, 99)).show()
"""
EASYPROCESS_USE_TEMP_FILES = os.environ.get("EASYPROCESS_USE_TEMP_FILES")
def test_dummy():
pass
# skip these tests for Windows/Mac
# and when 'use_temp_files' is forced by env variable
if sys.platform.startswith("linux") and not EASYPROCESS_USE_TEMP_FILES:
def test_has_imagemagick():
assert EasyProcess(["display", "-version"]).call().return_code == 0
@pytest.mark.timeout(10)
def test_deadlock_temp_files():
with Display():
p = EasyProcess([python, "-c", PROG,], use_temp_files=True,)
p.start()
sleep(1)
# hangs with pipes
p.stop()
@pytest.mark.timeout(10)
def test_deadlock_pipe():
with Display():
p = EasyProcess([python, "-c", PROG,], use_temp_files=False,)
p.start()
sleep(1)
def start():
# hangs with pipes
p.stop()
thread = threading.Thread(target=start)
thread.start()
sleep(3)
assert thread.is_alive()
thread.join()
| 22.609375 | 75 | 0.621977 | import os
import sys
import threading
from time import sleep
import pytest
from pyvirtualdisplay.display import Display
from easyprocess import EasyProcess
python = sys.executable
PROG = """
from PIL import Image
Image.new("RGB",(99, 99)).show()
"""
EASYPROCESS_USE_TEMP_FILES = os.environ.get("EASYPROCESS_USE_TEMP_FILES")
def test_dummy():
pass
if sys.platform.startswith("linux") and not EASYPROCESS_USE_TEMP_FILES:
def test_has_imagemagick():
assert EasyProcess(["display", "-version"]).call().return_code == 0
@pytest.mark.timeout(10)
def test_deadlock_temp_files():
with Display():
p = EasyProcess([python, "-c", PROG,], use_temp_files=True,)
p.start()
sleep(1)
p.stop()
@pytest.mark.timeout(10)
def test_deadlock_pipe():
with Display():
p = EasyProcess([python, "-c", PROG,], use_temp_files=False,)
p.start()
sleep(1)
def start():
p.stop()
thread = threading.Thread(target=start)
thread.start()
sleep(3)
assert thread.is_alive()
thread.join()
| true | true |
1c3363e3946059d0e9af3446782a8ac9ca9eb2ca | 1,105 | py | Python | Ar_Script/past/密码安全等级检查.py | archerckk/PyTest | 610dd89df8d70c096f4670ca11ed2f0ca3196ca5 | [
"MIT"
] | null | null | null | Ar_Script/past/密码安全等级检查.py | archerckk/PyTest | 610dd89df8d70c096f4670ca11ed2f0ca3196ca5 | [
"MIT"
] | 1 | 2020-01-19T01:19:57.000Z | 2020-01-19T01:19:57.000Z | Ar_Script/past/密码安全等级检查.py | archerckk/PyTest | 610dd89df8d70c096f4670ca11ed2f0ca3196ca5 | [
"MIT"
] | null | null | null |
psw=input('请输入需要检查的密码组合:')
zimu=('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','A','B','C'
, 'D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
shuzi='1234567890'
fuhao='~!@#$%^&*()_+[]{}:;"|\<,>.?//*-+.'
length=len(psw)
while psw.isspace()or length==0:
print('你输入的密码为空(只包含空格),请重新输入:',end='')
psw=input()
length=len(psw)
if length<=8:
num_level=1
elif 8<length<16:
num_level=2
else:
num_level=3
str_level=0
for i in psw:
if i in zimu:
str_level+=1
break
for i in psw:
if i in fuhao:
str_level += 1
break
for i in psw:
if i in shuzi:
str_level += 1
break
while 1:
if num_level==1 and str_level==1:
print('安全等级为:低')
elif num_level==2 or str_level==2 :
print('安全等级为:中')
elif (num_level==3 or str_level==3)and not psw.startswith(zimu):
print('安全等级为:较高')
else:
print('安全等级为:高\n请继续保持')
break
print('1.密码必须由数字、字母、特殊字符三种组合\n2.密码长度不能低与16位\n3.请以字母开头')
break
| 22.55102 | 117 | 0.506787 |
psw=input('请输入需要检查的密码组合:')
zimu=('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','A','B','C'
, 'D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
shuzi='1234567890'
fuhao='~!@#$%^&*()_+[]{}:;"|\<,>.?//*-+.'
length=len(psw)
while psw.isspace()or length==0:
print('你输入的密码为空(只包含空格),请重新输入:',end='')
psw=input()
length=len(psw)
if length<=8:
num_level=1
elif 8<length<16:
num_level=2
else:
num_level=3
str_level=0
for i in psw:
if i in zimu:
str_level+=1
break
for i in psw:
if i in fuhao:
str_level += 1
break
for i in psw:
if i in shuzi:
str_level += 1
break
while 1:
if num_level==1 and str_level==1:
print('安全等级为:低')
elif num_level==2 or str_level==2 :
print('安全等级为:中')
elif (num_level==3 or str_level==3)and not psw.startswith(zimu):
print('安全等级为:较高')
else:
print('安全等级为:高\n请继续保持')
break
print('1.密码必须由数字、字母、特殊字符三种组合\n2.密码长度不能低与16位\n3.请以字母开头')
break
| true | true |
1c3364a3c9abb5f56a012e9bb0585f043b4e08cc | 8,171 | py | Python | orc8r/gateway/python/magma/magmad/proxy_client.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 3 | 2019-08-16T17:03:09.000Z | 2019-08-23T21:57:48.000Z | orc8r/gateway/python/magma/magmad/proxy_client.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 14 | 2019-11-15T12:01:18.000Z | 2019-12-12T14:37:42.000Z | orc8r/gateway/python/magma/magmad/proxy_client.py | 119Vik/magma-1 | 107a7b374466a837fc0a49b283ba9d6ff1d702e3 | [
"BSD-3-Clause"
] | 3 | 2019-11-15T15:56:25.000Z | 2019-11-21T10:34:59.000Z | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import logging
import aioh2
import h2.events
from orc8r.protos.sync_rpc_service_pb2 import GatewayResponse, SyncRPCResponse
from magma.common.service_registry import ServiceRegistry
class ControlProxyHttpClient(object):
"""
ControlProxyHttpClient is a httpclient sending request
to the control proxy local port. It's used in SyncRPCClient
for forwarding GatewayRequests from the cloud, and gets a GatewayResponse.
"""
def __init__(self):
self._connection_table = {} # map req id -> client
async def send(self, gateway_request, req_id, sync_rpc_response_queue,
conn_closed_table):
"""
Forwards the given request to the service provided
in :authority and awaits a response. If a exception is
raised, log the error and enqueue an empty SyncRPCResponse.
Else, enqueue SyncRPCResponse(s) that contains the GatewayResponse.
Args:
gateway_request: gateway_request: A GatewayRequest that is
defined in the sync_rpc_service.proto. It has fields gwId, authority,
path, headers, and payload.
req_id: request id that's associated with the response
sync_rpc_response_queue: the response queue that responses
will be put in
conn_closed_table: table that maps req ids to if the conn is closed
Returns: None.
"""
client = await self._get_client(gateway_request.authority)
# Small hack to set PingReceived to no-op because the log gets spammed
# with KeyError messages since aioh2 doesn't have a handler for
# PingReceived. Remove if future versions support it.
# pylint: disable=protected-access
if hasattr(h2.events, "PingReceived"):
# Need the hasattr here because some older versions of h2 may not
# have the PingReceived event
client._event_handlers[h2.events.PingReceived] = lambda _: None
# pylint: enable=protected-access
if req_id in self._connection_table:
logging.error("[SyncRPC] proxy_client is already handling "
"request ID %s", req_id)
sync_rpc_response_queue.put(
SyncRPCResponse(
heartBeat=False,
reqId=req_id,
respBody=GatewayResponse(
err=str("request ID {} is already being handled"
.format(req_id))
)
)
)
client.close_connection()
return
self._connection_table[req_id] = client
try:
await client.wait_functional()
req_headers = self._get_req_headers(gateway_request.headers,
gateway_request.path,
gateway_request.authority)
body = gateway_request.payload
stream_id = await client.start_request(req_headers)
await self._await_gateway_response(client, stream_id, body,
req_id, sync_rpc_response_queue,
conn_closed_table)
except ConnectionAbortedError:
logging.error("[SyncRPC] proxy_client connection "
"terminated by cloud")
except Exception as e: # pylint: disable=broad-except
logging.error("[SyncRPC] Exception in proxy_client: %s", e)
sync_rpc_response_queue.put(
SyncRPCResponse(heartBeat=False, reqId=req_id,
respBody=GatewayResponse(err=str(e))))
finally:
del self._connection_table[req_id]
client.close_connection()
def close_all_connections(self):
for _, client in self._connection_table.items():
client.close_connection()
self._connection_table.clear()
@staticmethod
async def _get_client(service):
(ip, port) = ServiceRegistry.get_service_address(service)
return await aioh2.open_connection(ip, port)
async def _await_gateway_response(self, client, stream_id, body,
req_id, response_queue,
conn_closed_table):
await client.send_data(stream_id, body, end_stream=True)
resp_headers = await client.recv_response(stream_id)
status = self._get_resp_status(resp_headers)
curr_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
next_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
while True:
trailers = await client.recv_trailers(stream_id) \
if not next_payload else []
headers = self._get_resp_headers(resp_headers, trailers)
res = GatewayResponse(status=status, headers=headers,
payload=curr_payload)
response_queue.put(
SyncRPCResponse(heartBeat=False, reqId=req_id, respBody=res))
if not next_payload:
break
curr_payload = next_payload
next_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
@staticmethod
def _get_req_headers(raw_req_headers, path, authority):
headers = [(":method", "POST"),
(":scheme", "http"),
(":path", path),
(":authority", authority)]
for key, val in raw_req_headers.items():
headers.append((key, val))
return headers
@staticmethod
def _get_resp_status(raw_headers):
return dict(raw_headers)[":status"]
@staticmethod
def _get_resp_headers(raw_headers, raw_trailers):
"""
Concatenate raw_headers and raw_tailers into a new dict
raw_headers: a list of headers
raw_trailers: a dict of trailers
Return: a dict of headers and trailers
"""
headers_dict = dict(raw_headers)
headers_dict.update(raw_trailers)
return headers_dict
@staticmethod
async def _read_stream(client, stream_id, req_id, response_queue,
conn_closed_table):
"""
Attempt to read from the stream. If it times out, send a keepConnActive
response to the response queue. If it continues to time out after a
very long period of time, raise asyncio.TimeoutError. If the connection
is closed by the client, raise ConnectionAbortedError.
"""
async def try_read_stream():
while True:
try:
payload = await asyncio.wait_for(
client.read_stream(stream_id), timeout=10.0)
if conn_closed_table.get(req_id, False):
raise ConnectionAbortedError
return payload
except asyncio.TimeoutError:
if conn_closed_table.get(req_id, False):
raise ConnectionAbortedError
response_queue.put(
SyncRPCResponse(
heartBeat=False,
reqId=req_id,
respBody=GatewayResponse(keepConnActive=True)
)
)
return await asyncio.wait_for(try_read_stream(), timeout=120.0)
| 41.267677 | 79 | 0.582793 | import asyncio
import logging
import aioh2
import h2.events
from orc8r.protos.sync_rpc_service_pb2 import GatewayResponse, SyncRPCResponse
from magma.common.service_registry import ServiceRegistry
class ControlProxyHttpClient(object):
def __init__(self):
self._connection_table = {}
async def send(self, gateway_request, req_id, sync_rpc_response_queue,
conn_closed_table):
client = await self._get_client(gateway_request.authority)
# PingReceived. Remove if future versions support it.
# pylint: disable=protected-access
if hasattr(h2.events, "PingReceived"):
# Need the hasattr here because some older versions of h2 may not
# have the PingReceived event
client._event_handlers[h2.events.PingReceived] = lambda _: None
# pylint: enable=protected-access
if req_id in self._connection_table:
logging.error("[SyncRPC] proxy_client is already handling "
"request ID %s", req_id)
sync_rpc_response_queue.put(
SyncRPCResponse(
heartBeat=False,
reqId=req_id,
respBody=GatewayResponse(
err=str("request ID {} is already being handled"
.format(req_id))
)
)
)
client.close_connection()
return
self._connection_table[req_id] = client
try:
await client.wait_functional()
req_headers = self._get_req_headers(gateway_request.headers,
gateway_request.path,
gateway_request.authority)
body = gateway_request.payload
stream_id = await client.start_request(req_headers)
await self._await_gateway_response(client, stream_id, body,
req_id, sync_rpc_response_queue,
conn_closed_table)
except ConnectionAbortedError:
logging.error("[SyncRPC] proxy_client connection "
"terminated by cloud")
except Exception as e: # pylint: disable=broad-except
logging.error("[SyncRPC] Exception in proxy_client: %s", e)
sync_rpc_response_queue.put(
SyncRPCResponse(heartBeat=False, reqId=req_id,
respBody=GatewayResponse(err=str(e))))
finally:
del self._connection_table[req_id]
client.close_connection()
def close_all_connections(self):
for _, client in self._connection_table.items():
client.close_connection()
self._connection_table.clear()
@staticmethod
async def _get_client(service):
(ip, port) = ServiceRegistry.get_service_address(service)
return await aioh2.open_connection(ip, port)
async def _await_gateway_response(self, client, stream_id, body,
req_id, response_queue,
conn_closed_table):
await client.send_data(stream_id, body, end_stream=True)
resp_headers = await client.recv_response(stream_id)
status = self._get_resp_status(resp_headers)
curr_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
next_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
while True:
trailers = await client.recv_trailers(stream_id) \
if not next_payload else []
headers = self._get_resp_headers(resp_headers, trailers)
res = GatewayResponse(status=status, headers=headers,
payload=curr_payload)
response_queue.put(
SyncRPCResponse(heartBeat=False, reqId=req_id, respBody=res))
if not next_payload:
break
curr_payload = next_payload
next_payload = await self._read_stream(client, stream_id, req_id,
response_queue,
conn_closed_table)
@staticmethod
def _get_req_headers(raw_req_headers, path, authority):
headers = [(":method", "POST"),
(":scheme", "http"),
(":path", path),
(":authority", authority)]
for key, val in raw_req_headers.items():
headers.append((key, val))
return headers
@staticmethod
def _get_resp_status(raw_headers):
return dict(raw_headers)[":status"]
@staticmethod
def _get_resp_headers(raw_headers, raw_trailers):
headers_dict = dict(raw_headers)
headers_dict.update(raw_trailers)
return headers_dict
@staticmethod
async def _read_stream(client, stream_id, req_id, response_queue,
conn_closed_table):
async def try_read_stream():
while True:
try:
payload = await asyncio.wait_for(
client.read_stream(stream_id), timeout=10.0)
if conn_closed_table.get(req_id, False):
raise ConnectionAbortedError
return payload
except asyncio.TimeoutError:
if conn_closed_table.get(req_id, False):
raise ConnectionAbortedError
response_queue.put(
SyncRPCResponse(
heartBeat=False,
reqId=req_id,
respBody=GatewayResponse(keepConnActive=True)
)
)
return await asyncio.wait_for(try_read_stream(), timeout=120.0)
| true | true |
1c3365e3b522bc4f34c72ef66f784cc0cc49dbf1 | 10,574 | py | Python | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T06:15:53.000Z | 2020-08-12T06:15:53.000Z | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import re
import socket
import threading
import time
import serial
from . import settings
__all__ = ['OpenThreadController']
logger = logging.getLogger(__name__)
linesepx = re.compile(r'\r\n|\n')
class OpenThreadController(threading.Thread):
"""This is an simple wrapper to communicate with openthread"""
_lock = threading.Lock()
viewing = False
def __init__(self, port, log=False):
"""Initialize the controller
Args:
port (str): serial port's path or name(windows)
"""
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init()
def _init(self):
self._connect()
if not self._log:
return
self.start()
def __del__(self):
self.close()
def close(self):
if self.is_alive():
self.viewing = False
self.join()
self._close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _close(self):
if self.handle:
self.handle.close()
self.handle = None
def _connect(self):
logger.debug('My port is %s', self.port)
if self.port.startswith('NET'):
portnum = settings.SER2NET_PORTBASE + int(self.port.split('NET')[1])
logger.debug('My port num is %d', portnum)
address = (settings.SER2NET_HOSTNAME, portnum)
self.handle = socket.create_connection(address)
self.handle.setblocking(0)
self._is_net = True
elif ':' in self.port:
host, port = self.port.split(':')
self.handle = socket.create_connection((host, port))
self.handle.setblocking(0)
self._is_net = True
else:
self.handle = serial.Serial(self.port, 115200, timeout=0, xonxoff=True)
self._is_net = False
def _read(self, size=512):
if self._is_net:
return self.handle.recv(size)
else:
return self.handle.read(size)
def _write(self, data):
if self._is_net:
self.handle.sendall(data)
else:
self.handle.write(data)
def _expect(self, expected, times=50):
"""Find the `expected` line within `times` trials.
Args:
expected str: the expected string
times int: number of trials
"""
logger.debug('[%s] Expecting [%s]', self.port, expected)
retry_times = 10
while times:
if not retry_times:
break
line = self._readline()
if line == expected:
return
if not line:
retry_times -= 1
time.sleep(0.1)
times -= 1
raise Exception('failed to find expected string[%s]' % expected)
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
def _sendline(self, line):
"""Send exactly one line to the device
Args:
line str: data send to device
"""
self.lines = []
try:
self._read()
except socket.error:
logging.debug('Nothing cleared')
logger.debug('sending [%s]', line)
self._write(line + '\r\n')
# wait for write to complete
time.sleep(0.5)
def _req(self, req):
"""Send command and wait for response.
The command will be repeated 3 times at most in case data loss of serial port.
Args:
req (str): Command to send, please do not include new line in the end.
Returns:
[str]: The output lines
"""
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
res = []
while True:
line = self._readline()
logger.debug('Got line %s', line)
if line == 'Done':
break
if line:
res.append(line)
break
except BaseException:
logger.exception('Failed to send command')
self.close()
self._init()
self._log and self.resume()
return res
def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except BaseException:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0)
def is_started(self):
"""check if openthread is started
Returns:
bool: started or not
"""
state = self._req('state')[0]
return state != 'disabled'
def start(self):
"""Start openthread
"""
self._req('ifconfig up')
self._req('thread start')
def stop(self):
"""Stop openthread
"""
self._req('thread stop')
self._req('ifconfig down')
def reset(self):
"""Reset openthread device, not equivalent to stop and start
"""
logger.debug('DUT> reset')
self._log and self.pause()
self._sendline('reset')
self._read()
self._log and self.resume()
def resume(self):
"""Start dumping logs"""
self._lock.release()
def pause(self):
"""Start dumping logs"""
self._lock.acquire()
@property
def networkname(self):
"""str: Thread network name."""
return self._req('networkname')[0]
@networkname.setter
def networkname(self, value):
self._req('networkname %s' % value)
@property
def mode(self):
"""str: Thread mode."""
return self._req('mode')[0]
@mode.setter
def mode(self, value):
self._req('mode %s' % value)
@property
def mac(self):
"""str: MAC address of the device"""
return self._req('extaddr')[0]
@property
def addrs(self):
"""[str]: IP addresses of the devices"""
return self._req('ipaddr')
@property
def short_addr(self):
"""str: Short address"""
return self._req('rloc16')[0]
@property
def channel(self):
"""int: Channel number of openthread"""
return int(self._req('channel')[0])
@channel.setter
def channel(self, value):
self._req('channel %d' % value)
@property
def panid(self):
"""str: Thread panid"""
return self._req('panid')[0]
@panid.setter
def panid(self, value):
self._req('panid %s' % value)
@property
def extpanid(self):
"""str: Thread extpanid"""
return self._req('extpanid')[0]
@extpanid.setter
def extpanid(self, value):
self._req('extpanid %s' % value)
@property
def child_timeout(self):
"""str: Thread child timeout in seconds"""
return self._req('childtimeout')[0]
@child_timeout.setter
def child_timeout(self, value):
self._req('childtimeout %d' % value)
@property
def version(self):
"""str: Open thread version"""
return self._req('version')[0]
def add_prefix(self, prefix, flags, prf):
"""Add network prefix.
Args:
prefix (str): network prefix.
flags (str): network prefix flags, please refer thread documentation for details
prf (str): network prf, please refer thread documentation for details
"""
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister')
def remove_prefix(self, prefix):
"""Remove network prefix.
"""
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister')
def enable_blacklist(self):
"""Enable blacklist feature"""
self._req('blacklist enable')
def add_blacklist(self, mac):
"""Add a mac address to blacklist"""
self._req('blacklist add %s' % mac)
| 27.322997 | 92 | 0.56847 |
import logging
import re
import socket
import threading
import time
import serial
from . import settings
__all__ = ['OpenThreadController']
logger = logging.getLogger(__name__)
linesepx = re.compile(r'\r\n|\n')
class OpenThreadController(threading.Thread):
_lock = threading.Lock()
viewing = False
def __init__(self, port, log=False):
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init()
def _init(self):
self._connect()
if not self._log:
return
self.start()
def __del__(self):
self.close()
def close(self):
if self.is_alive():
self.viewing = False
self.join()
self._close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _close(self):
if self.handle:
self.handle.close()
self.handle = None
def _connect(self):
logger.debug('My port is %s', self.port)
if self.port.startswith('NET'):
portnum = settings.SER2NET_PORTBASE + int(self.port.split('NET')[1])
logger.debug('My port num is %d', portnum)
address = (settings.SER2NET_HOSTNAME, portnum)
self.handle = socket.create_connection(address)
self.handle.setblocking(0)
self._is_net = True
elif ':' in self.port:
host, port = self.port.split(':')
self.handle = socket.create_connection((host, port))
self.handle.setblocking(0)
self._is_net = True
else:
self.handle = serial.Serial(self.port, 115200, timeout=0, xonxoff=True)
self._is_net = False
def _read(self, size=512):
if self._is_net:
return self.handle.recv(size)
else:
return self.handle.read(size)
def _write(self, data):
if self._is_net:
self.handle.sendall(data)
else:
self.handle.write(data)
def _expect(self, expected, times=50):
logger.debug('[%s] Expecting [%s]', self.port, expected)
retry_times = 10
while times:
if not retry_times:
break
line = self._readline()
if line == expected:
return
if not line:
retry_times -= 1
time.sleep(0.1)
times -= 1
raise Exception('failed to find expected string[%s]' % expected)
def _readline(self):
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
def _sendline(self, line):
self.lines = []
try:
self._read()
except socket.error:
logging.debug('Nothing cleared')
logger.debug('sending [%s]', line)
self._write(line + '\r\n')
time.sleep(0.5)
def _req(self, req):
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
res = []
while True:
line = self._readline()
logger.debug('Got line %s', line)
if line == 'Done':
break
if line:
res.append(line)
break
except BaseException:
logger.exception('Failed to send command')
self.close()
self._init()
self._log and self.resume()
return res
def run(self):
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except BaseException:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0)
def is_started(self):
state = self._req('state')[0]
return state != 'disabled'
def start(self):
self._req('ifconfig up')
self._req('thread start')
def stop(self):
self._req('thread stop')
self._req('ifconfig down')
def reset(self):
logger.debug('DUT> reset')
self._log and self.pause()
self._sendline('reset')
self._read()
self._log and self.resume()
def resume(self):
self._lock.release()
def pause(self):
self._lock.acquire()
@property
def networkname(self):
return self._req('networkname')[0]
@networkname.setter
def networkname(self, value):
self._req('networkname %s' % value)
@property
def mode(self):
return self._req('mode')[0]
@mode.setter
def mode(self, value):
self._req('mode %s' % value)
@property
def mac(self):
return self._req('extaddr')[0]
@property
def addrs(self):
return self._req('ipaddr')
@property
def short_addr(self):
return self._req('rloc16')[0]
@property
def channel(self):
return int(self._req('channel')[0])
@channel.setter
def channel(self, value):
self._req('channel %d' % value)
@property
def panid(self):
return self._req('panid')[0]
@panid.setter
def panid(self, value):
self._req('panid %s' % value)
@property
def extpanid(self):
return self._req('extpanid')[0]
@extpanid.setter
def extpanid(self, value):
self._req('extpanid %s' % value)
@property
def child_timeout(self):
return self._req('childtimeout')[0]
@child_timeout.setter
def child_timeout(self, value):
self._req('childtimeout %d' % value)
@property
def version(self):
return self._req('version')[0]
def add_prefix(self, prefix, flags, prf):
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister')
def remove_prefix(self, prefix):
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister')
def enable_blacklist(self):
self._req('blacklist enable')
def add_blacklist(self, mac):
self._req('blacklist add %s' % mac)
| true | true |
1c336671addc0cbf4baad8279578295bfaff1469 | 396 | py | Python | examples/16_helix_origami_rectangle_no_seq_no_twist.py | scadnano-test-user/scadnano-python-package-1 | 9becb5a076579f6cbac1ebfeda514540bb84ab87 | [
"MIT"
] | null | null | null | examples/16_helix_origami_rectangle_no_seq_no_twist.py | scadnano-test-user/scadnano-python-package-1 | 9becb5a076579f6cbac1ebfeda514540bb84ab87 | [
"MIT"
] | null | null | null | examples/16_helix_origami_rectangle_no_seq_no_twist.py | scadnano-test-user/scadnano-python-package-1 | 9becb5a076579f6cbac1ebfeda514540bb84ab87 | [
"MIT"
] | null | null | null | import origami_rectangle as rect
import scadnano as sc
def main():
design = rect.create(num_helices=16, num_cols=26, assign_seq=False, twist_correction_deletion_spacing=3,
twist_correction_start_col=2)
return design
if not sc.in_browser() and __name__ == '__main__':
design = main()
design.write_scadnano_file(directory='output_designs')
| 28.285714 | 109 | 0.694444 | import origami_rectangle as rect
import scadnano as sc
def main():
design = rect.create(num_helices=16, num_cols=26, assign_seq=False, twist_correction_deletion_spacing=3,
twist_correction_start_col=2)
return design
if not sc.in_browser() and __name__ == '__main__':
design = main()
design.write_scadnano_file(directory='output_designs')
| true | true |
1c33698445117e404bad1744f55343c669082b4b | 5,825 | py | Python | solid_i18n/middleware.py | affan2/django-solid-i18n-urls | 3b858a84499261a3f86253e472f83cd79e90bab6 | [
"BSD-3-Clause"
] | null | null | null | solid_i18n/middleware.py | affan2/django-solid-i18n-urls | 3b858a84499261a3f86253e472f83cd79e90bab6 | [
"BSD-3-Clause"
] | null | null | null | solid_i18n/middleware.py | affan2/django-solid-i18n-urls | 3b858a84499261a3f86253e472f83cd79e90bab6 | [
"BSD-3-Clause"
] | 1 | 2020-01-09T10:22:39.000Z | 2020-01-09T10:22:39.000Z | from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.urlresolvers import (is_valid_path, get_resolver,
get_script_prefix)
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.utils.cache import patch_vary_headers
from django.utils import translation as trans
from django.utils.translation.trans_real import language_code_prefix_re
from django.middleware.locale import LocaleMiddleware
from .urlresolvers import SolidLocaleRegexURLResolver
from .memory import set_language_from_path
class SolidLocaleMiddleware(LocaleMiddleware):
"""
Request without language prefix will use default language.
Or, if settings.SOLID_I18N_USE_REDIRECTS is True, try to discover language.
If language is not equal to default language, redirect to discovered
language.
If request contains language prefix, this language will be used immediately.
In that case settings.SOLID_I18N_USE_REDIRECTS doesn't make sense.
Default language is set in settings.LANGUAGE_CODE.
"""
response_redirect_class = HttpResponseRedirect
response_permanent_redirect_class = HttpResponsePermanentRedirect
def __init__(self):
self._is_language_prefix_patterns_used = False
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, SolidLocaleRegexURLResolver):
self._is_language_prefix_patterns_used = True
break
@property
def use_redirects(self):
return getattr(settings, 'SOLID_I18N_USE_REDIRECTS', False)
@property
def default_lang(self):
return settings.LANGUAGE_CODE
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
language_path = trans.get_language_from_path(request.path_info)
if check_path and not self.use_redirects:
language = language_path or self.default_lang
else:
language = trans.get_language_from_request(request, check_path)
set_language_from_path(language_path)
trans.activate(language)
request.LANGUAGE_CODE = trans.get_language()
def process_response(self, request, response):
language = trans.get_language()
language_from_path = trans.get_language_from_path(request.path_info)
if (getattr(settings, 'SOLID_I18N_DEFAULT_PREFIX_REDIRECT', False)
and language_from_path == self.default_lang
and self.is_language_prefix_patterns_used()):
redirect = self.perform_redirect(request, '', getattr(settings, 'SOLID_I18N_USE_PERMANENT_DEFAULT_PREFIX_REDIRECT', False))
if redirect:
return redirect
elif self.use_redirects:
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used()
and language != self.default_lang):
redirect = self.perform_redirect(request, language, getattr(settings, 'SOLID_I18N_USE_PERMANENT_REDIRECTS', False))
if redirect:
return redirect
if not (self.is_language_prefix_patterns_used()
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if DJANGO_VERSION < (1, 6):
trans.deactivate()
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def remove_lang_from_path(self, path):
no_lang_tag_path = path
regex_match = language_code_prefix_re.match(path)
if regex_match:
lang_code = regex_match.group(1)
no_lang_tag_path = path[1 + len(lang_code):]
if not no_lang_tag_path.startswith('/'):
no_lang_tag_path = '/' + no_lang_tag_path
return no_lang_tag_path
def perform_redirect(self, request, language, permanent=False):
# language can be empty string (in case of default language)
path_info = request.path_info
full_path = request.get_full_path()
if not language:
path_info = self.remove_lang_from_path(path_info)
full_path = self.remove_lang_from_path(full_path)
urlconf = getattr(request, 'urlconf', None)
language_path = '%s%s' % (language, path_info)
if not language_path.startswith('/'):
language_path = '/' + language_path
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
if DJANGO_VERSION >= (1, 7):
scheme = request.scheme
else:
scheme = 'https' if request.is_secure() else 'http'
script_prefix = get_script_prefix()
language_url = "%s://%s%s" % (
scheme,
request.get_host(),
# insert language after the script prefix and before the
# rest of the URL
full_path.replace(
script_prefix,
'%s%s/' % (script_prefix, language) if language else script_prefix,
1
)
)
if permanent:
return self.response_permanent_redirect_class(language_url)
return self.response_redirect_class(language_url)
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `SolidLocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
return self._is_language_prefix_patterns_used
| 43.796992 | 135 | 0.664721 | from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.urlresolvers import (is_valid_path, get_resolver,
get_script_prefix)
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.utils.cache import patch_vary_headers
from django.utils import translation as trans
from django.utils.translation.trans_real import language_code_prefix_re
from django.middleware.locale import LocaleMiddleware
from .urlresolvers import SolidLocaleRegexURLResolver
from .memory import set_language_from_path
class SolidLocaleMiddleware(LocaleMiddleware):
response_redirect_class = HttpResponseRedirect
response_permanent_redirect_class = HttpResponsePermanentRedirect
def __init__(self):
self._is_language_prefix_patterns_used = False
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, SolidLocaleRegexURLResolver):
self._is_language_prefix_patterns_used = True
break
@property
def use_redirects(self):
return getattr(settings, 'SOLID_I18N_USE_REDIRECTS', False)
@property
def default_lang(self):
return settings.LANGUAGE_CODE
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
language_path = trans.get_language_from_path(request.path_info)
if check_path and not self.use_redirects:
language = language_path or self.default_lang
else:
language = trans.get_language_from_request(request, check_path)
set_language_from_path(language_path)
trans.activate(language)
request.LANGUAGE_CODE = trans.get_language()
def process_response(self, request, response):
language = trans.get_language()
language_from_path = trans.get_language_from_path(request.path_info)
if (getattr(settings, 'SOLID_I18N_DEFAULT_PREFIX_REDIRECT', False)
and language_from_path == self.default_lang
and self.is_language_prefix_patterns_used()):
redirect = self.perform_redirect(request, '', getattr(settings, 'SOLID_I18N_USE_PERMANENT_DEFAULT_PREFIX_REDIRECT', False))
if redirect:
return redirect
elif self.use_redirects:
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used()
and language != self.default_lang):
redirect = self.perform_redirect(request, language, getattr(settings, 'SOLID_I18N_USE_PERMANENT_REDIRECTS', False))
if redirect:
return redirect
if not (self.is_language_prefix_patterns_used()
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if DJANGO_VERSION < (1, 6):
trans.deactivate()
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def remove_lang_from_path(self, path):
no_lang_tag_path = path
regex_match = language_code_prefix_re.match(path)
if regex_match:
lang_code = regex_match.group(1)
no_lang_tag_path = path[1 + len(lang_code):]
if not no_lang_tag_path.startswith('/'):
no_lang_tag_path = '/' + no_lang_tag_path
return no_lang_tag_path
def perform_redirect(self, request, language, permanent=False):
path_info = request.path_info
full_path = request.get_full_path()
if not language:
path_info = self.remove_lang_from_path(path_info)
full_path = self.remove_lang_from_path(full_path)
urlconf = getattr(request, 'urlconf', None)
language_path = '%s%s' % (language, path_info)
if not language_path.startswith('/'):
language_path = '/' + language_path
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
if DJANGO_VERSION >= (1, 7):
scheme = request.scheme
else:
scheme = 'https' if request.is_secure() else 'http'
script_prefix = get_script_prefix()
language_url = "%s://%s%s" % (
scheme,
request.get_host(),
full_path.replace(
script_prefix,
'%s%s/' % (script_prefix, language) if language else script_prefix,
1
)
)
if permanent:
return self.response_permanent_redirect_class(language_url)
return self.response_redirect_class(language_url)
def is_language_prefix_patterns_used(self):
return self._is_language_prefix_patterns_used
| true | true |
1c3369cc581ad8d634b869346c35da6cd76f897b | 2,850 | py | Python | process.py | anirudhbelwadi/music-application-using-machine-learning | ef3514a59dd5d903bbd62ee0664d458098099d08 | [
"MIT"
] | 4 | 2021-07-01T12:38:53.000Z | 2021-11-13T09:21:14.000Z | process.py | anirudhbelwadi/music-application-using-machine-learning | ef3514a59dd5d903bbd62ee0664d458098099d08 | [
"MIT"
] | null | null | null | process.py | anirudhbelwadi/music-application-using-machine-learning | ef3514a59dd5d903bbd62ee0664d458098099d08 | [
"MIT"
] | null | null | null | import json
import os
import math
import librosa
DATASET_PATH = "./test"
JSON_PATH = "top.json"
SAMPLE_RATE = 22050
TRACK_DURATION = 30 # measured in seconds
SAMPLES_PER_TRACK = SAMPLE_RATE * TRACK_DURATION
def save_mfcc(dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):
"""Extracts MFCCs from music dataset and saves them into a json file along witgh genre labels.
:param dataset_path (str): Path to dataset
:param json_path (str): Path to json file used to save MFCCs
:param num_mfcc (int): Number of coefficients to extract
:param n_fft (int): Interval we consider to apply FFT. Measured in # of samples
:param hop_length (int): Sliding window for FFT. Measured in # of samples
:param: num_segments (int): Number of segments we want to divide sample tracks into
:return:
"""
# dictionary to store mapping, labels, and MFCCs
data = {
"mapping": [],
"labels": [],
"mfcc": []
}
samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
# loop through all genre sub-folder
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
# ensure we're processing a genre sub-folder level
if dirpath is not dataset_path:
# save genre label (i.e., sub-folder name) in the mapping
semantic_label = dirpath.split("/")[-1]
data["mapping"].append(semantic_label)
print("\nProcessing: {}".format(semantic_label))
# process all audio files in genre sub-dir
for f in filenames:
# load audio file
file_path = os.path.join(dirpath, f)
signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)
# process all segments of audio file
for d in range(num_segments):
# calculate start and finish sample for current segment
start = samples_per_segment * d
finish = start + samples_per_segment
# extract mfcc
mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,
hop_length=hop_length)
mfcc = mfcc.T
# store only mfcc feature with expected number of vectors
if len(mfcc) == num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i - 1)
print("{}, segment:{}".format(file_path, d + 1))
# save MFCCs to json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
| 38.513514 | 112 | 0.600702 | import json
import os
import math
import librosa
DATASET_PATH = "./test"
JSON_PATH = "top.json"
SAMPLE_RATE = 22050
TRACK_DURATION = 30
SAMPLES_PER_TRACK = SAMPLE_RATE * TRACK_DURATION
def save_mfcc(dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):
data = {
"mapping": [],
"labels": [],
"mfcc": []
}
samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
# save genre label (i.e., sub-folder name) in the mapping
semantic_label = dirpath.split("/")[-1]
data["mapping"].append(semantic_label)
print("\nProcessing: {}".format(semantic_label))
# process all audio files in genre sub-dir
for f in filenames:
# load audio file
file_path = os.path.join(dirpath, f)
signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)
# process all segments of audio file
for d in range(num_segments):
# calculate start and finish sample for current segment
start = samples_per_segment * d
finish = start + samples_per_segment
# extract mfcc
mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,
hop_length=hop_length)
mfcc = mfcc.T
# store only mfcc feature with expected number of vectors
if len(mfcc) == num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i - 1)
print("{}, segment:{}".format(file_path, d + 1))
# save MFCCs to json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
| true | true |
1c336b9139628ad9f04b4d05454eb9f431d71374 | 2,735 | py | Python | mfdnres/descriptor_formats/mfdn_format_8.py | nd-nuclear-theory/slurp | 6b6c8faeda829b2e9b9751ae48ff7dbf69f2a7c6 | [
"MIT"
] | 1 | 2020-12-10T02:29:06.000Z | 2020-12-10T02:29:06.000Z | mfdnres/descriptor_formats/mfdn_format_8.py | nd-nuclear-theory/slurp | 6b6c8faeda829b2e9b9751ae48ff7dbf69f2a7c6 | [
"MIT"
] | null | null | null | mfdnres/descriptor_formats/mfdn_format_8.py | nd-nuclear-theory/slurp | 6b6c8faeda829b2e9b9751ae48ff7dbf69f2a7c6 | [
"MIT"
] | 1 | 2021-06-23T14:51:29.000Z | 2021-06-23T14:51:29.000Z | """ mfdn_format_8.py -- declares descriptor parser
Language: Python 3
Patrick J. Fasano
University of Notre Dame
03/18/18 (pjf): Initiated (based on mfdn_format_7_ho.py).
09/06/18 (pjf): Allow hyphens in interaction name.
"""
import re
# intra-package references
from .. import descriptor
_parity_map = {"g0": +1, "g1": -1, "gx": 0}
def _truncation_parser(substr):
regex = re.compile(
r""
)
def parser(filename):
"""Parse results filename in format 8.
Args:
filename (string) : filename (as basename)
Returns:
(dict) : info parsed from filename
"""
regex = re.compile(
# prolog
r"run(?P<run>\w+)"
r"\-(?P<code_name>[^\-]+)"
r"\-(?P<descriptor>"
# descriptor contents
r"Z(?P<Z>\d+)\-N(?P<N>\d+)"
r"\-(?P<interaction>.+)\-coul(?P<coulomb>\d)"
r"\-hw(?P<hw>[\d\.]+)"
r"\-a_cm(?P<lawson>[\d\.]+)"
r"\-an(?P<n_coeff>\d+\.\d{3})"
r"\-bl(?P<l_coeff>\d+\.\d{3})"
r"\-spWTmax(?P<sp_weight_max>\d+\.\d{3})"
r"\-((?P<fci_flag>FCI)|WTmax(?P<mb_weight_max>\d+\.\d{3}))"
r"\-(?P<parity_indicator>g.)"
r"\-Mj(?P<Mj>[\d\.]+)"
r"\-its(?P<max_iterations>\d+)"
r"\-tol(?P<tolerance>\d+\.\d+[eE][+-]\d+)"
r"((?P<natural_orbital_flag>\-natorb)\-no(?P<natural_orbital_iteration>\d+))?"
# epilog
r").res"
)
conversions = {
"Z": int,
"N": int,
"interaction" : str,
"coulomb": int,
"hw": float,
"lawson": float,
"n_coeff": float,
"l_coeff": float,
"sp_weight_max": float,
"mb_weight_max": (lambda s: float(s) if (s is not None) else None),
"fci_flag": (lambda s: (s == "FCI")),
"parity_indicator": (lambda s: _parity_map[s]),
"Mj": float,
"max_iterations": int,
"natural_orbital_flag": (lambda s: (s == "-natorb")),
"natural_orbital_iteration": (lambda i: int(i) if (i is not None) else None)
}
match = regex.match(filename)
if (match is None):
raise ValueError("bad form for MFDn results filename: " + filename)
info = match.groupdict()
# convert fields
for key in conversions:
conversion = conversions[key]
info[key] = conversion(info[key])
return info
descriptor.register_filename_format("mfdn_format_8", parser)
if (__name__ == "__main__"):
filename = r"run0000-mfdn15-Z2-N6-Daejeon16-coul1-hw10.000-a_cm0-an01.500-bl01.000-spWTmax12.000-WTmax15.000-g0-Mj0.0-its200-tol1.0e-06.res"
info = descriptor.parse_res_filename(filename, filename_format="mfdn_format_8")
print(filename)
print(info)
| 27.35 | 144 | 0.558318 |
import re
from .. import descriptor
_parity_map = {"g0": +1, "g1": -1, "gx": 0}
def _truncation_parser(substr):
regex = re.compile(
r""
)
def parser(filename):
regex = re.compile(
r"run(?P<run>\w+)"
r"\-(?P<code_name>[^\-]+)"
r"\-(?P<descriptor>"
r"Z(?P<Z>\d+)\-N(?P<N>\d+)"
r"\-(?P<interaction>.+)\-coul(?P<coulomb>\d)"
r"\-hw(?P<hw>[\d\.]+)"
r"\-a_cm(?P<lawson>[\d\.]+)"
r"\-an(?P<n_coeff>\d+\.\d{3})"
r"\-bl(?P<l_coeff>\d+\.\d{3})"
r"\-spWTmax(?P<sp_weight_max>\d+\.\d{3})"
r"\-((?P<fci_flag>FCI)|WTmax(?P<mb_weight_max>\d+\.\d{3}))"
r"\-(?P<parity_indicator>g.)"
r"\-Mj(?P<Mj>[\d\.]+)"
r"\-its(?P<max_iterations>\d+)"
r"\-tol(?P<tolerance>\d+\.\d+[eE][+-]\d+)"
r"((?P<natural_orbital_flag>\-natorb)\-no(?P<natural_orbital_iteration>\d+))?"
r").res"
)
conversions = {
"Z": int,
"N": int,
"interaction" : str,
"coulomb": int,
"hw": float,
"lawson": float,
"n_coeff": float,
"l_coeff": float,
"sp_weight_max": float,
"mb_weight_max": (lambda s: float(s) if (s is not None) else None),
"fci_flag": (lambda s: (s == "FCI")),
"parity_indicator": (lambda s: _parity_map[s]),
"Mj": float,
"max_iterations": int,
"natural_orbital_flag": (lambda s: (s == "-natorb")),
"natural_orbital_iteration": (lambda i: int(i) if (i is not None) else None)
}
match = regex.match(filename)
if (match is None):
raise ValueError("bad form for MFDn results filename: " + filename)
info = match.groupdict()
for key in conversions:
conversion = conversions[key]
info[key] = conversion(info[key])
return info
descriptor.register_filename_format("mfdn_format_8", parser)
if (__name__ == "__main__"):
filename = r"run0000-mfdn15-Z2-N6-Daejeon16-coul1-hw10.000-a_cm0-an01.500-bl01.000-spWTmax12.000-WTmax15.000-g0-Mj0.0-its200-tol1.0e-06.res"
info = descriptor.parse_res_filename(filename, filename_format="mfdn_format_8")
print(filename)
print(info)
| true | true |
1c336c1207f698399536830e67b1a84561fc0177 | 10,452 | py | Python | scripts/OQMD2PyChemia.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 67 | 2015-01-31T07:44:55.000Z | 2022-03-21T21:43:34.000Z | scripts/OQMD2PyChemia.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 13 | 2016-06-03T19:07:51.000Z | 2022-03-31T04:20:40.000Z | scripts/OQMD2PyChemia.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 37 | 2015-01-22T15:37:23.000Z | 2022-03-21T15:38:10.000Z | #!/usr/bin/env python
import os
import sys
import time
import logging
import itertools
import argparse
from multiprocessing import Pool, cpu_count, Process
import pychemia
from pychemia.utils.periodic import atomic_symbol
try:
from qmpy import Entry
except ImportError:
Entry = None
print("Could not import 'qmpy' as needed to interface with the OQMD database")
exit(1)
def run_one(a):
"""
Take one OQMD 'Entry' object, search all the calculations associated and take the best calculation
in order to insert its data into the PyChemia Database
:param a: OQMD Entry object
:return:
"""
energy = 1E10
best_calculation = None
if a.calculation_set.count() > 0:
if 'standard' in a.calculations:
best_calculation = a.calculations['standard']
calculation_name = 'standard'
elif 'fine_relax' in a.calculations:
best_calculation = a.calculations['fine_relax']
calculation_name = 'fine_relax'
elif 'coarse_relax' in a.calculations:
best_calculation = a.calculations['coarse_relax']
calculation_name = 'coarse_relax'
elif 'static' in a.calculations:
best_calculation = a.calculations['static']
calculation_name = 'static'
elif 'relaxation' in a.calculations:
best_calculation = a.calculations['relaxation']
calculation_name = 'relaxation'
elif len(a.calculations) > 0:
calculations = sorted(a.calculations.keys())
print('Calculations found: %s, using the last one' % calculations)
best_calculation = a.calculations[calculations[-1]]
calculation_name = calculations[-1]
else:
print('ERROR: Count > 0 and no calculation found')
if best_calculation is not None:
structure_name = None
if best_calculation.output is not None:
structure_used = best_calculation.output
structure_id = best_calculation.output_id
from_output = True
elif best_calculation.input is not None:
print(
'WARNING: No data was found from the output of the calculation, using input geometries and leaving '
'energetics empty')
structure_used = best_calculation.input
structure_id = best_calculation.input_id
from_output = False
else:
calculation_name = None
if a.structures is not None and len(a.structures) > 0:
struct_keys = sorted(a.structures.keys())
print("WARNING: Calculation not found for %s. Structures found: %s using the first one " % (a, struct_keys))
structure_used = a.structures[struct_keys[0]]
structure_id = None
from_output = False
structure_name = struct_keys[0]
else:
print("ERROR: No calculation and no structure found for %s" % a)
return None, None
cell = structure_used.cell.T
symbols = atomic_symbol(structure_used.atomic_numbers)
reduced = structure_used.coords
structure = pychemia.Structure(cell=cell, symbols=symbols, reduced=reduced)
entry_id = a.id
if best_calculation is not None:
calculation_id = best_calculation.id
energy_pa = best_calculation.energy_pa
energy = best_calculation.energy
band_gap = best_calculation.band_gap
settings = best_calculation.settings
try:
spacegroup_number = best_calculation.output.spacegroup.number
except AttributeError:
spacegroup_number = None
else:
calculation_id = None
energy_pa = None
energy = None
band_gap = None
settings = None
spacegroup_number = None
from_output = False
try:
symm = pychemia.crystal.CrystalSymmetry(structure)
sym2 = symm.number(1E-2)
except ValueError:
sym2 = None
properties = {'oqmd': {'structure_id': structure_id,
'entry_id': entry_id,
'calculation_id': calculation_id,
'energy_pa': energy_pa,
'energy': energy,
'band_gap': band_gap,
'settings': settings,
'from_output': from_output,
'calculation_name': calculation_name,
'structure_name': structure_name,
'spacegroup_number': spacegroup_number},
'spacegroup_number': {'value': sym2, 'symprec': 1E-2}}
return structure, properties
def getter(entry_ids, db_settings, current, start=0):
pcdb = pychemia.db.get_database(db_settings)
ret = []
index = 0
n = 0
initial = start * jump
final = min(start * jump + jump, len(entry_ids))
print('Process: %2d Processing from %6d to %6d total: %d' % (start, initial, final, len(entry_ids)))
for a_id in entry_ids[initial:final]:
if a_id not in current[index:]:
ret.append(a_id)
n += 1
else:
index = current.index(a_id)
# Removing duplicated entries
if index + 1 < len(current) and current[index + 1] == a_id:
print('We found at least one duplicate!')
duplicate = False
for entry in pcdb.db.pychemia_entries.find({'properties.oqmd.entry_id': a_id}):
if duplicate:
print('Removing PyChemiaDB entry: %s' % str(entry['_id']))
pcdb.db.pychemia_entries.remove({'_id': entry['_id']})
duplicate = True
print('Process: %2d Entries missing: %3d' % (start, len(ret)))
return ret
def setter(db_settings, to_insert):
print('Processing %d entries - ' % len(to_insert), end='')
pcdb = pychemia.db.get_database(db_settings)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process: %d - ' % os.getppid(), end='')
print('process id: %d' % os.getpid())
index = 0
for oqmd_id in to_insert:
if index % 2000 == 0:
print(index, oqmd_id)
index += 1
structure = None
properties = None
a = Entry.objects.get(id=oqmd_id)
structure, properties = run_one(a)
if structure is not None:
entry_id = '%d_%s_' % (structure.nspecies, structure.formula)
n = len(entry_id)
texto = '%0' + ('%d' % (28 - n)) + 'd'
entry_id += texto % properties['oqmd']['entry_id']
if n > 17:
print("%2d - %s" % (28 - n, entry_id))
pcdb.insert(structure, properties=properties, entry_id=entry_id)
return 0
def getter_star(a_b):
return getter(*a_b)
def setter_star(a_b):
return setter(*a_b)
version = 0.1
jump = 10000
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create or Update a PyChemia Database from the OQMD Database (www.oqmd.org)')
parser.add_argument('-dbname', metavar='<DATABASE>', type=str, help='Database Name', default='PyChemiaMasterDB')
parser.add_argument('-port', metavar='<PORTNUMBER>', type=int, help='Port (default: 27017)', default=27017)
parser.add_argument('-ssl', metavar='<SSL>', type=bool, help='Using SSL (default:no)', default=False)
parser.add_argument('-user', metavar='<USERNAME>', type=str, help='Database Username', default=None)
parser.add_argument('-host', metavar='<HOSTNAME>', type=str, help='Hostname (default: localhost)',
default='localhost')
parser.add_argument('-nprocs', metavar='N', type=int,
help='Number of concurrent proccess (default: Number of CPUs)', default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('pychemia')
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.INFO)
db_settings = {'name': args.dbname, 'host': args.host, 'port': args.port, 'ssl': args.ssl}
if args.user is not None:
passwd = input('Password: ')
db_settings['user'] = args.user
db_settings['passwd'] = passwd
print('Database settings: \n%s\b' % db_settings)
pcdb = pychemia.db.get_database(db_settings)
nitems = pcdb.entries.count()
print('Number of entries in the current PyChemia Database: %d' % nitems)
current = []
for entry in pcdb.db.pychemia_entries.find({'properties.oqmd.entry_id': {'$exists': True}}):
current.append(entry['properties']['oqmd']['entry_id'])
current.sort()
print('Number of entries coming from OQMD: %d' % len(current))
print('Number of entries in OQMD...', end='')
queryset = Entry.objects.all()
entry_ids = [entry.id for entry in queryset]
print('%d' % len(entry_ids))
if args.nprocs is None:
nprocs = cpu_count()
else:
nprocs = args.nprocs
print('Creating a pool of %d processes for feeding the database' % nprocs)
pool = Pool(processes=nprocs)
argus = []
a_args = range((len(entry_ids) / jump) + 1)
to_insert = pool.map(getter_star, itertools.izip(itertools.repeat(entry_ids),
itertools.repeat(db_settings),
itertools.repeat(current), a_args), chunksize=1)
pool.close()
# to_insert=to_insert[:20]
print(len(to_insert))
print(db_settings)
ps = [None for x in range(nprocs)]
counter = 0
# QMPY does not support concurrent executions
# while counter < len(to_insert):
# for i in range(nprocs):
# if ps[i] is None or not ps[i].is_alive():
# ps[i] = Process(target=setter, args=(db_settings,to_insert[counter]))
# ps[i].start()
# #ps[i].join()
# counter+=1
# if counter == len(to_insert):
# break
# time.sleep(1)
# time.sleep(30)
# print(ps)
# pool = Pool(processes=nprocs)
# ret = pool.map(setter_star, itertools.izip(itertools.repeat(db_settings), to_insert), chunksize=1)
# pool.close()
for i in range(len(to_insert)):
if len(to_insert[i]) > 0:
setter(db_settings, to_insert[i])
| 35.55102 | 120 | 0.598833 |
import os
import sys
import time
import logging
import itertools
import argparse
from multiprocessing import Pool, cpu_count, Process
import pychemia
from pychemia.utils.periodic import atomic_symbol
try:
from qmpy import Entry
except ImportError:
Entry = None
print("Could not import 'qmpy' as needed to interface with the OQMD database")
exit(1)
def run_one(a):
energy = 1E10
best_calculation = None
if a.calculation_set.count() > 0:
if 'standard' in a.calculations:
best_calculation = a.calculations['standard']
calculation_name = 'standard'
elif 'fine_relax' in a.calculations:
best_calculation = a.calculations['fine_relax']
calculation_name = 'fine_relax'
elif 'coarse_relax' in a.calculations:
best_calculation = a.calculations['coarse_relax']
calculation_name = 'coarse_relax'
elif 'static' in a.calculations:
best_calculation = a.calculations['static']
calculation_name = 'static'
elif 'relaxation' in a.calculations:
best_calculation = a.calculations['relaxation']
calculation_name = 'relaxation'
elif len(a.calculations) > 0:
calculations = sorted(a.calculations.keys())
print('Calculations found: %s, using the last one' % calculations)
best_calculation = a.calculations[calculations[-1]]
calculation_name = calculations[-1]
else:
print('ERROR: Count > 0 and no calculation found')
if best_calculation is not None:
structure_name = None
if best_calculation.output is not None:
structure_used = best_calculation.output
structure_id = best_calculation.output_id
from_output = True
elif best_calculation.input is not None:
print(
'WARNING: No data was found from the output of the calculation, using input geometries and leaving '
'energetics empty')
structure_used = best_calculation.input
structure_id = best_calculation.input_id
from_output = False
else:
calculation_name = None
if a.structures is not None and len(a.structures) > 0:
struct_keys = sorted(a.structures.keys())
print("WARNING: Calculation not found for %s. Structures found: %s using the first one " % (a, struct_keys))
structure_used = a.structures[struct_keys[0]]
structure_id = None
from_output = False
structure_name = struct_keys[0]
else:
print("ERROR: No calculation and no structure found for %s" % a)
return None, None
cell = structure_used.cell.T
symbols = atomic_symbol(structure_used.atomic_numbers)
reduced = structure_used.coords
structure = pychemia.Structure(cell=cell, symbols=symbols, reduced=reduced)
entry_id = a.id
if best_calculation is not None:
calculation_id = best_calculation.id
energy_pa = best_calculation.energy_pa
energy = best_calculation.energy
band_gap = best_calculation.band_gap
settings = best_calculation.settings
try:
spacegroup_number = best_calculation.output.spacegroup.number
except AttributeError:
spacegroup_number = None
else:
calculation_id = None
energy_pa = None
energy = None
band_gap = None
settings = None
spacegroup_number = None
from_output = False
try:
symm = pychemia.crystal.CrystalSymmetry(structure)
sym2 = symm.number(1E-2)
except ValueError:
sym2 = None
properties = {'oqmd': {'structure_id': structure_id,
'entry_id': entry_id,
'calculation_id': calculation_id,
'energy_pa': energy_pa,
'energy': energy,
'band_gap': band_gap,
'settings': settings,
'from_output': from_output,
'calculation_name': calculation_name,
'structure_name': structure_name,
'spacegroup_number': spacegroup_number},
'spacegroup_number': {'value': sym2, 'symprec': 1E-2}}
return structure, properties
def getter(entry_ids, db_settings, current, start=0):
pcdb = pychemia.db.get_database(db_settings)
ret = []
index = 0
n = 0
initial = start * jump
final = min(start * jump + jump, len(entry_ids))
print('Process: %2d Processing from %6d to %6d total: %d' % (start, initial, final, len(entry_ids)))
for a_id in entry_ids[initial:final]:
if a_id not in current[index:]:
ret.append(a_id)
n += 1
else:
index = current.index(a_id)
if index + 1 < len(current) and current[index + 1] == a_id:
print('We found at least one duplicate!')
duplicate = False
for entry in pcdb.db.pychemia_entries.find({'properties.oqmd.entry_id': a_id}):
if duplicate:
print('Removing PyChemiaDB entry: %s' % str(entry['_id']))
pcdb.db.pychemia_entries.remove({'_id': entry['_id']})
duplicate = True
print('Process: %2d Entries missing: %3d' % (start, len(ret)))
return ret
def setter(db_settings, to_insert):
print('Processing %d entries - ' % len(to_insert), end='')
pcdb = pychemia.db.get_database(db_settings)
if hasattr(os, 'getppid'):
print('parent process: %d - ' % os.getppid(), end='')
print('process id: %d' % os.getpid())
index = 0
for oqmd_id in to_insert:
if index % 2000 == 0:
print(index, oqmd_id)
index += 1
structure = None
properties = None
a = Entry.objects.get(id=oqmd_id)
structure, properties = run_one(a)
if structure is not None:
entry_id = '%d_%s_' % (structure.nspecies, structure.formula)
n = len(entry_id)
texto = '%0' + ('%d' % (28 - n)) + 'd'
entry_id += texto % properties['oqmd']['entry_id']
if n > 17:
print("%2d - %s" % (28 - n, entry_id))
pcdb.insert(structure, properties=properties, entry_id=entry_id)
return 0
def getter_star(a_b):
return getter(*a_b)
def setter_star(a_b):
return setter(*a_b)
version = 0.1
jump = 10000
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create or Update a PyChemia Database from the OQMD Database (www.oqmd.org)')
parser.add_argument('-dbname', metavar='<DATABASE>', type=str, help='Database Name', default='PyChemiaMasterDB')
parser.add_argument('-port', metavar='<PORTNUMBER>', type=int, help='Port (default: 27017)', default=27017)
parser.add_argument('-ssl', metavar='<SSL>', type=bool, help='Using SSL (default:no)', default=False)
parser.add_argument('-user', metavar='<USERNAME>', type=str, help='Database Username', default=None)
parser.add_argument('-host', metavar='<HOSTNAME>', type=str, help='Hostname (default: localhost)',
default='localhost')
parser.add_argument('-nprocs', metavar='N', type=int,
help='Number of concurrent proccess (default: Number of CPUs)', default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('pychemia')
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.INFO)
db_settings = {'name': args.dbname, 'host': args.host, 'port': args.port, 'ssl': args.ssl}
if args.user is not None:
passwd = input('Password: ')
db_settings['user'] = args.user
db_settings['passwd'] = passwd
print('Database settings: \n%s\b' % db_settings)
pcdb = pychemia.db.get_database(db_settings)
nitems = pcdb.entries.count()
print('Number of entries in the current PyChemia Database: %d' % nitems)
current = []
for entry in pcdb.db.pychemia_entries.find({'properties.oqmd.entry_id': {'$exists': True}}):
current.append(entry['properties']['oqmd']['entry_id'])
current.sort()
print('Number of entries coming from OQMD: %d' % len(current))
print('Number of entries in OQMD...', end='')
queryset = Entry.objects.all()
entry_ids = [entry.id for entry in queryset]
print('%d' % len(entry_ids))
if args.nprocs is None:
nprocs = cpu_count()
else:
nprocs = args.nprocs
print('Creating a pool of %d processes for feeding the database' % nprocs)
pool = Pool(processes=nprocs)
argus = []
a_args = range((len(entry_ids) / jump) + 1)
to_insert = pool.map(getter_star, itertools.izip(itertools.repeat(entry_ids),
itertools.repeat(db_settings),
itertools.repeat(current), a_args), chunksize=1)
pool.close()
print(len(to_insert))
print(db_settings)
ps = [None for x in range(nprocs)]
counter = 0
for i in range(len(to_insert)):
if len(to_insert[i]) > 0:
setter(db_settings, to_insert[i])
| true | true |
1c336c1dabe0cc6cdb07b243776fe77d86a4311f | 874 | py | Python | lib/cocoapi_windows/PythonAPI/setup.py | Shank2358/NPMMR-Det | 414d148ff2ba5edbe870a8dafb6336845fb9ffbb | [
"Apache-2.0"
] | 27 | 2021-01-09T07:35:45.000Z | 2022-02-06T03:18:54.000Z | lib/cocoapi_windows/PythonAPI/setup.py | Shank2358/NPMMR-Det | 414d148ff2ba5edbe870a8dafb6336845fb9ffbb | [
"Apache-2.0"
] | 4 | 2021-07-20T07:16:03.000Z | 2022-03-29T14:22:59.000Z | lib/cocoapi_windows/PythonAPI/setup.py | Shank2358/NPMMR-Det | 414d148ff2ba5edbe870a8dafb6336845fb9ffbb | [
"Apache-2.0"
] | 1 | 2021-12-26T09:12:08.000Z | 2021-12-26T09:12:08.000Z | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
# To install and compile to your anaconda/python site-packages, simply run:
# $ pip install git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI
# Note that the original compile flags below are GCC flags unsupported by the Visual C++ 2015 build tools.
# They can safely be removed.
ext_modules = [
Extension(
'pycocotools._mask',
sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [np.get_include(), '../common'],
extra_compile_args=[] # originally was ['-std=c99'],
)
]
setup(name='pycocotools',
packages=['pycocotools'],
package_dir = {'pycocotools': 'pycocotools'},
version='2.0',
ext_modules=
cythonize(ext_modules)
)
| 32.37037 | 106 | 0.688787 | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
Extension(
'pycocotools._mask',
sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [np.get_include(), '../common'],
extra_compile_args=[]
)
]
setup(name='pycocotools',
packages=['pycocotools'],
package_dir = {'pycocotools': 'pycocotools'},
version='2.0',
ext_modules=
cythonize(ext_modules)
)
| true | true |
1c336dd763456ee791f4e197b59b28af345ec219 | 4,052 | py | Python | alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetQueryModel import AlipayMarketingCampaignDiscountBudgetQueryModel
class AlipayMarketingCampaignDiscountBudgetQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.944828 | 148 | 0.65153 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetQueryModel import AlipayMarketingCampaignDiscountBudgetQueryModel
class AlipayMarketingCampaignDiscountBudgetQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
1c336e553ba984f8fa1f69078ffb7e7486ff9172 | 9,600 | py | Python | python/codegen/generate_vhdl.py | MaartenBaert/xormix | 2b649bd9c2defd00dbffefb59db4de0b40785714 | [
"BSL-1.0",
"Apache-2.0",
"MIT"
] | 17 | 2022-02-21T02:23:30.000Z | 2022-03-03T03:38:54.000Z | python/codegen/generate_vhdl.py | MaartenBaert/xormix | 2b649bd9c2defd00dbffefb59db4de0b40785714 | [
"BSL-1.0",
"Apache-2.0",
"MIT"
] | null | null | null | python/codegen/generate_vhdl.py | MaartenBaert/xormix | 2b649bd9c2defd00dbffefb59db4de0b40785714 | [
"BSL-1.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright (c) 2020-2021 Maarten Baert <info@maartenbaert.be>
# Available under the MIT License - see LICENSE.txt for details.
import math
import random
import xormix16
import xormix24
import xormix32
import xormix48
import xormix64
import xormix96
import xormix128
random.seed(0x75cf32031077f039)
modules = {
16: xormix16,
24: xormix24,
32: xormix32,
48: xormix48,
64: xormix64,
96: xormix96,
128: xormix128,
}
def generate_rtl(n, filename):
l = math.ceil(math.log10(n))
matrix = modules[n].matrix
salts = modules[n].salts
shuffle = modules[n].shuffle
shifts = modules[n].shifts
with open(filename, 'w') as f:
f.write(f'-- Copyright (c) 2020-2021 Maarten Baert <info@maartenbaert.be>\n')
f.write(f'-- Available under the MIT License - see LICENSE.txt for details.\n')
f.write(f'\n')
f.write(f'-- This file was generated by `generate_vhdl.py`.\n')
f.write(f'\n')
f.write(f'library ieee;\n')
f.write(f'use ieee.std_logic_1164.all;\n')
f.write(f'\n')
f.write(f'entity xormix{n} is\n')
f.write(f' generic (\n')
f.write(f' streams : integer range 1 to {n} := 1\n')
f.write(f' );\n')
f.write(f' port (\n')
f.write(f' \n')
f.write(f' -- clock and synchronous reset\n')
f.write(f' clk : in std_logic;\n')
f.write(f' rst : in std_logic;\n')
f.write(f' \n')
f.write(f' -- configuration\n')
f.write(f' seed_x : in std_logic_vector({n - 1} downto 0);\n')
f.write(f' seed_y : in std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' -- random number generator\n')
f.write(f' enable : in std_logic;\n')
f.write(f' result : out std_logic_vector({n} * streams - 1 downto 0)\n')
f.write(f' \n')
f.write(f' );\n')
f.write(f'end xormix{n};\n')
f.write(f'\n')
f.write(f'architecture rtl of xormix{n} is\n')
f.write(f' \n')
f.write(f' type salts_t is array(0 to {n - 1}) of std_logic_vector({n - 1} downto 0);\n')
f.write(f' constant salts : salts_t := (\n')
for i in range(0, n, 4):
row = ', '.join(f'x"{x:0{n // 4}x}"' for x in salts[i : i + 4])
f.write(f' ' + row + ('\n' if i == n - 4 else ',\n'))
f.write(f' );\n')
f.write(f' \n')
f.write(f' signal r_state_x : std_logic_vector({n - 1} downto 0);\n')
f.write(f' signal r_state_y : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f'begin\n')
f.write(f' \n')
f.write(f' result <= r_state_y;\n')
f.write(f' \n')
f.write(f' process (clk)\n')
f.write(f' \n')
f.write(f' variable v_state_y : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' variable v_mixin : std_logic_vector({n - 1} downto 0);\n')
f.write(f' variable v_mixup : std_logic_vector({n - 1} downto 0);\n')
f.write(f' variable v_res : std_logic_vector({n // 2 - 1} downto 0);\n')
f.write(f' \n')
f.write(f' begin\n')
f.write(f' if rising_edge(clk) then\n')
f.write(f' if rst = \'1\' then\n')
f.write(f' \n')
f.write(f' r_state_x <= seed_x;\n')
f.write(f' r_state_y <= seed_y;\n')
f.write(f' \n')
f.write(f' elsif enable = \'1\' then\n')
f.write(f' \n')
for i in range(n):
row = ' xor '.join(f'r_state_x({j:{l}})' for j in matrix[i])
f.write(f' r_state_x({i:{l}}) <= {row};\n')
f.write(f' \n')
f.write(f' for i in 0 to streams - 1 loop\n')
f.write(f' v_mixin := r_state_x xor salts(i);\n')
f.write(f' v_mixup := r_state_y({n} * ((i + 1) mod streams) + {n - 1} downto {n} * ((i + 1) mod streams));\n')
for k in range(n // 2):
f.write(f' v_res({k:{l}}) := v_mixup({k:{l}}) xor (v_mixup({k + shifts[0]:{l}}) and not v_mixup({k + shifts[1]:{l}})) xor v_mixup({k + shifts[2]:{l}}) xor v_mixup({k + shifts[3]:{l}}) xor v_mixin((i + {shuffle[k]:{l}}) mod {n});\n')
f.write(f' v_state_y({n} * i + {n - 1} downto {n} * i) := v_res & r_state_y({n} * i + {n - 1} downto {n} * i + {n // 2});\n')
f.write(f' end loop;\n')
f.write(f' \n')
f.write(f' for i in 0 to streams - 1 loop\n')
f.write(f' v_mixin := r_state_x xor salts(i);\n')
f.write(f' v_mixup := v_state_y({n} * ((i + 1) mod streams) + {n - 1} downto {n} * ((i + 1) mod streams));\n')
for k in range(n // 2):
f.write(f' v_res({k:{l}}) := v_mixup({k:{l}}) xor (v_mixup({k + shifts[0]:{l}}) and not v_mixup({k + shifts[1]:{l}})) xor v_mixup({k + shifts[2]:{l}}) xor v_mixup({k + shifts[3]:{l}}) xor v_mixin((i + {shuffle[k + n // 2]:{l}}) mod {n});\n')
f.write(f' r_state_y({n} * i + {n - 1} downto {n} * i) <= v_res & v_state_y({n} * i + {n - 1} downto {n} * i + {n // 2});\n')
f.write(f' end loop;\n')
f.write(f' \n')
f.write(f' end if;\n')
f.write(f' end if;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f'end rtl;\n')
f.write(f'\n')
def generate_tb(n, filename):
test_streams = 4
test_outputs = 100
seed = [random.getrandbits(n) for i in range(test_streams + 1)]
state = seed.copy()
output = []
for i in range(test_outputs):
output.append(state[1 :])
modules[n].next_state(state)
l = math.ceil(math.log10(n))
with open(filename, 'w') as f:
f.write(f'-- Copyright (c) 2020-2021 Maarten Baert <info@maartenbaert.be>\n')
f.write(f'-- Available under the MIT License - see LICENSE.txt for details.\n')
f.write(f'\n')
f.write(f'-- This file was generated by `generate_vhdl.py`.\n')
f.write(f'\n')
f.write(f'library ieee;\n')
f.write(f'use ieee.std_logic_1164.all;\n')
f.write(f'\n')
f.write(f'entity xormix{n}_tb is\n')
f.write(f'end xormix{n}_tb;\n')
f.write(f'\n')
f.write(f'architecture bhv of xormix{n}_tb is\n')
f.write(f' \n')
f.write(f' -- configuration\n')
f.write(f' constant streams : integer := {test_streams};\n')
f.write(f' constant results : integer := {test_outputs};\n')
f.write(f' constant seed_x : std_logic_vector({n - 1} downto 0) :=\n')
f.write(f' x"{seed[0]:0{n // 4}x}";\n')
f.write(f' constant seed_y : std_logic_vector({n} * streams - 1 downto 0) :=\n')
row = ''.join(f'{s:0{n // 4}x}' for s in reversed(seed[1 :]))
f.write(f' x"{row}";\n')
f.write(f' \n')
f.write(f' -- reference result\n')
f.write(f' type result_array_t is array(0 to results - 1) of std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' signal ref_result : result_array_t := (\n')
for i in range(test_outputs):
row = ''.join(f'{s:0{n // 4}x}' for s in reversed(output[i]))
f.write(f' x"{row}"' + ('\n' if i == test_outputs - 1 else ',\n'))
f.write(f' );\n')
f.write(f' \n')
f.write(f' -- DUT signals\n')
f.write(f' signal clk : std_logic := \'0\';\n')
f.write(f' signal rst : std_logic;\n')
f.write(f' signal enable : std_logic;\n')
f.write(f' signal result : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' -- flag to stop simulation\n')
f.write(f' signal run : boolean := true;\n')
f.write(f' \n')
f.write(f'begin\n')
f.write(f' \n')
f.write(f' -- DUT\n')
f.write(f' inst_xormix : entity work.xormix{n} generic map(\n')
f.write(f' streams => streams\n')
f.write(f' ) port map (\n')
f.write(f' clk => clk,\n')
f.write(f' rst => rst,\n')
f.write(f' seed_x => seed_x,\n')
f.write(f' seed_y => seed_y,\n')
f.write(f' enable => enable,\n')
f.write(f' result => result\n')
f.write(f' );\n')
f.write(f' \n')
f.write(f' -- clock process\n')
f.write(f' process\n')
f.write(f' begin\n')
f.write(f' while run loop\n')
f.write(f' clk <= \'1\';\n')
f.write(f' wait for 5 ns;\n')
f.write(f' clk <= \'0\';\n')
f.write(f' wait for 5 ns;\n')
f.write(f' end loop;\n')
f.write(f' wait;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f' -- input/output process\n')
f.write(f' process\n')
f.write(f' variable errors : natural := 0;\n')
f.write(f' begin\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' rst <= \'1\';\n')
f.write(f' enable <= \'0\';\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' rst <= \'0\';\n')
f.write(f' enable <= \'1\';\n')
f.write(f' for i in 0 to results - 1 loop\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' if result /= ref_result(i) then\n')
f.write(f' report "Incorrect result for i=" & integer\'image(i) severity warning;\n')
f.write(f' errors := errors + 1;\n')
f.write(f' end if;\n')
f.write(f' end loop;\n')
f.write(f' report "Test complete, number of errors: " & integer\'image(errors) severity note;\n')
f.write(f' run <= false;\n')
f.write(f' wait;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f'end bhv;\n')
f.write(f'\n')
for n in modules:
generate_rtl(n, f'../../vhdl/rtl/xormix{n}.vhd')
for n in modules:
generate_tb(n, f'../../vhdl/tb/xormix{n}_tb.vhd')
| 41.921397 | 263 | 0.53125 |
import math
import random
import xormix16
import xormix24
import xormix32
import xormix48
import xormix64
import xormix96
import xormix128
random.seed(0x75cf32031077f039)
modules = {
16: xormix16,
24: xormix24,
32: xormix32,
48: xormix48,
64: xormix64,
96: xormix96,
128: xormix128,
}
def generate_rtl(n, filename):
l = math.ceil(math.log10(n))
matrix = modules[n].matrix
salts = modules[n].salts
shuffle = modules[n].shuffle
shifts = modules[n].shifts
with open(filename, 'w') as f:
f.write(f'-- Copyright (c) 2020-2021 Maarten Baert <info@maartenbaert.be>\n')
f.write(f'-- Available under the MIT License - see LICENSE.txt for details.\n')
f.write(f'\n')
f.write(f'-- This file was generated by `generate_vhdl.py`.\n')
f.write(f'\n')
f.write(f'library ieee;\n')
f.write(f'use ieee.std_logic_1164.all;\n')
f.write(f'\n')
f.write(f'entity xormix{n} is\n')
f.write(f' generic (\n')
f.write(f' streams : integer range 1 to {n} := 1\n')
f.write(f' );\n')
f.write(f' port (\n')
f.write(f' \n')
f.write(f' -- clock and synchronous reset\n')
f.write(f' clk : in std_logic;\n')
f.write(f' rst : in std_logic;\n')
f.write(f' \n')
f.write(f' -- configuration\n')
f.write(f' seed_x : in std_logic_vector({n - 1} downto 0);\n')
f.write(f' seed_y : in std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' -- random number generator\n')
f.write(f' enable : in std_logic;\n')
f.write(f' result : out std_logic_vector({n} * streams - 1 downto 0)\n')
f.write(f' \n')
f.write(f' );\n')
f.write(f'end xormix{n};\n')
f.write(f'\n')
f.write(f'architecture rtl of xormix{n} is\n')
f.write(f' \n')
f.write(f' type salts_t is array(0 to {n - 1}) of std_logic_vector({n - 1} downto 0);\n')
f.write(f' constant salts : salts_t := (\n')
for i in range(0, n, 4):
row = ', '.join(f'x"{x:0{n // 4}x}"' for x in salts[i : i + 4])
f.write(f' ' + row + ('\n' if i == n - 4 else ',\n'))
f.write(f' );\n')
f.write(f' \n')
f.write(f' signal r_state_x : std_logic_vector({n - 1} downto 0);\n')
f.write(f' signal r_state_y : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f'begin\n')
f.write(f' \n')
f.write(f' result <= r_state_y;\n')
f.write(f' \n')
f.write(f' process (clk)\n')
f.write(f' \n')
f.write(f' variable v_state_y : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' variable v_mixin : std_logic_vector({n - 1} downto 0);\n')
f.write(f' variable v_mixup : std_logic_vector({n - 1} downto 0);\n')
f.write(f' variable v_res : std_logic_vector({n // 2 - 1} downto 0);\n')
f.write(f' \n')
f.write(f' begin\n')
f.write(f' if rising_edge(clk) then\n')
f.write(f' if rst = \'1\' then\n')
f.write(f' \n')
f.write(f' r_state_x <= seed_x;\n')
f.write(f' r_state_y <= seed_y;\n')
f.write(f' \n')
f.write(f' elsif enable = \'1\' then\n')
f.write(f' \n')
for i in range(n):
row = ' xor '.join(f'r_state_x({j:{l}})' for j in matrix[i])
f.write(f' r_state_x({i:{l}}) <= {row};\n')
f.write(f' \n')
f.write(f' for i in 0 to streams - 1 loop\n')
f.write(f' v_mixin := r_state_x xor salts(i);\n')
f.write(f' v_mixup := r_state_y({n} * ((i + 1) mod streams) + {n - 1} downto {n} * ((i + 1) mod streams));\n')
for k in range(n // 2):
f.write(f' v_res({k:{l}}) := v_mixup({k:{l}}) xor (v_mixup({k + shifts[0]:{l}}) and not v_mixup({k + shifts[1]:{l}})) xor v_mixup({k + shifts[2]:{l}}) xor v_mixup({k + shifts[3]:{l}}) xor v_mixin((i + {shuffle[k]:{l}}) mod {n});\n')
f.write(f' v_state_y({n} * i + {n - 1} downto {n} * i) := v_res & r_state_y({n} * i + {n - 1} downto {n} * i + {n // 2});\n')
f.write(f' end loop;\n')
f.write(f' \n')
f.write(f' for i in 0 to streams - 1 loop\n')
f.write(f' v_mixin := r_state_x xor salts(i);\n')
f.write(f' v_mixup := v_state_y({n} * ((i + 1) mod streams) + {n - 1} downto {n} * ((i + 1) mod streams));\n')
for k in range(n // 2):
f.write(f' v_res({k:{l}}) := v_mixup({k:{l}}) xor (v_mixup({k + shifts[0]:{l}}) and not v_mixup({k + shifts[1]:{l}})) xor v_mixup({k + shifts[2]:{l}}) xor v_mixup({k + shifts[3]:{l}}) xor v_mixin((i + {shuffle[k + n // 2]:{l}}) mod {n});\n')
f.write(f' r_state_y({n} * i + {n - 1} downto {n} * i) <= v_res & v_state_y({n} * i + {n - 1} downto {n} * i + {n // 2});\n')
f.write(f' end loop;\n')
f.write(f' \n')
f.write(f' end if;\n')
f.write(f' end if;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f'end rtl;\n')
f.write(f'\n')
def generate_tb(n, filename):
test_streams = 4
test_outputs = 100
seed = [random.getrandbits(n) for i in range(test_streams + 1)]
state = seed.copy()
output = []
for i in range(test_outputs):
output.append(state[1 :])
modules[n].next_state(state)
l = math.ceil(math.log10(n))
with open(filename, 'w') as f:
f.write(f'-- Copyright (c) 2020-2021 Maarten Baert <info@maartenbaert.be>\n')
f.write(f'-- Available under the MIT License - see LICENSE.txt for details.\n')
f.write(f'\n')
f.write(f'-- This file was generated by `generate_vhdl.py`.\n')
f.write(f'\n')
f.write(f'library ieee;\n')
f.write(f'use ieee.std_logic_1164.all;\n')
f.write(f'\n')
f.write(f'entity xormix{n}_tb is\n')
f.write(f'end xormix{n}_tb;\n')
f.write(f'\n')
f.write(f'architecture bhv of xormix{n}_tb is\n')
f.write(f' \n')
f.write(f' -- configuration\n')
f.write(f' constant streams : integer := {test_streams};\n')
f.write(f' constant results : integer := {test_outputs};\n')
f.write(f' constant seed_x : std_logic_vector({n - 1} downto 0) :=\n')
f.write(f' x"{seed[0]:0{n // 4}x}";\n')
f.write(f' constant seed_y : std_logic_vector({n} * streams - 1 downto 0) :=\n')
row = ''.join(f'{s:0{n // 4}x}' for s in reversed(seed[1 :]))
f.write(f' x"{row}";\n')
f.write(f' \n')
f.write(f' -- reference result\n')
f.write(f' type result_array_t is array(0 to results - 1) of std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' signal ref_result : result_array_t := (\n')
for i in range(test_outputs):
row = ''.join(f'{s:0{n // 4}x}' for s in reversed(output[i]))
f.write(f' x"{row}"' + ('\n' if i == test_outputs - 1 else ',\n'))
f.write(f' );\n')
f.write(f' \n')
f.write(f' -- DUT signals\n')
f.write(f' signal clk : std_logic := \'0\';\n')
f.write(f' signal rst : std_logic;\n')
f.write(f' signal enable : std_logic;\n')
f.write(f' signal result : std_logic_vector({n} * streams - 1 downto 0);\n')
f.write(f' \n')
f.write(f' -- flag to stop simulation\n')
f.write(f' signal run : boolean := true;\n')
f.write(f' \n')
f.write(f'begin\n')
f.write(f' \n')
f.write(f' -- DUT\n')
f.write(f' inst_xormix : entity work.xormix{n} generic map(\n')
f.write(f' streams => streams\n')
f.write(f' ) port map (\n')
f.write(f' clk => clk,\n')
f.write(f' rst => rst,\n')
f.write(f' seed_x => seed_x,\n')
f.write(f' seed_y => seed_y,\n')
f.write(f' enable => enable,\n')
f.write(f' result => result\n')
f.write(f' );\n')
f.write(f' \n')
f.write(f' -- clock process\n')
f.write(f' process\n')
f.write(f' begin\n')
f.write(f' while run loop\n')
f.write(f' clk <= \'1\';\n')
f.write(f' wait for 5 ns;\n')
f.write(f' clk <= \'0\';\n')
f.write(f' wait for 5 ns;\n')
f.write(f' end loop;\n')
f.write(f' wait;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f' -- input/output process\n')
f.write(f' process\n')
f.write(f' variable errors : natural := 0;\n')
f.write(f' begin\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' rst <= \'1\';\n')
f.write(f' enable <= \'0\';\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' rst <= \'0\';\n')
f.write(f' enable <= \'1\';\n')
f.write(f' for i in 0 to results - 1 loop\n')
f.write(f' wait until rising_edge(clk);\n')
f.write(f' if result /= ref_result(i) then\n')
f.write(f' report "Incorrect result for i=" & integer\'image(i) severity warning;\n')
f.write(f' errors := errors + 1;\n')
f.write(f' end if;\n')
f.write(f' end loop;\n')
f.write(f' report "Test complete, number of errors: " & integer\'image(errors) severity note;\n')
f.write(f' run <= false;\n')
f.write(f' wait;\n')
f.write(f' end process;\n')
f.write(f' \n')
f.write(f'end bhv;\n')
f.write(f'\n')
for n in modules:
generate_rtl(n, f'../../vhdl/rtl/xormix{n}.vhd')
for n in modules:
generate_tb(n, f'../../vhdl/tb/xormix{n}_tb.vhd')
| true | true |
1c337243f51bf5c05b4d429314fb8f3d6588f946 | 3,421 | py | Python | django/djangoproject/settings.py | RajapandiR/django-reactjs | 15371ee816ef4dfecd4cc0f4243fc358796c375b | [
"MIT"
] | null | null | null | django/djangoproject/settings.py | RajapandiR/django-reactjs | 15371ee816ef4dfecd4cc0f4243fc358796c375b | [
"MIT"
] | null | null | null | django/djangoproject/settings.py | RajapandiR/django-reactjs | 15371ee816ef4dfecd4cc0f4243fc358796c375b | [
"MIT"
] | null | null | null | """
Django settings for djangoproject project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yjb#&^z%!1rvv57rgz!gmrrf!#vsc_9068t5k_*yosh^r!ij6^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'djangoapp'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # new
'django.middleware.common.CommonMiddleware', # new
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# CORS_ORIGIN_WHITELIST = (
# 'localhost:3000/'
# )
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'djangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fb',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.154412 | 91 | 0.688103 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'yjb#&^z%!1rvv57rgz!gmrrf!#vsc_9068t5k_*yosh^r!ij6^'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'djangoapp'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # new
'django.middleware.common.CommonMiddleware', # new
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# CORS_ORIGIN_WHITELIST = (
# 'localhost:3000/'
# )
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'djangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fb',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c3372470083842622d5ff11c75962a863ecf220 | 650 | py | Python | msgboard/migrations/0001_initial.py | Yoavsc129/beyond-tutorial | 733d2d26df980f2ccb3fa7d1595112bee089d8be | [
"MIT"
] | null | null | null | msgboard/migrations/0001_initial.py | Yoavsc129/beyond-tutorial | 733d2d26df980f2ccb3fa7d1595112bee089d8be | [
"MIT"
] | null | null | null | msgboard/migrations/0001_initial.py | Yoavsc129/beyond-tutorial | 733d2d26df980f2ccb3fa7d1595112bee089d8be | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-08 09:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 26 | 117 | 0.586154 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| true | true |
1c33726a2db273b76310d3a6da8874396a32f59d | 4,270 | py | Python | alipay/aop/api/domain/DishonestyDetailInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/DishonestyDetailInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/DishonestyDetailInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DishonestyDetailInfo(object):
def __init__(self):
self._behavior = None
self._case_code = None
self._enforce_court = None
self._id_number = None
self._name = None
self._performance = None
self._publish_date = None
self._region = None
@property
def behavior(self):
return self._behavior
@behavior.setter
def behavior(self, value):
self._behavior = value
@property
def case_code(self):
return self._case_code
@case_code.setter
def case_code(self, value):
self._case_code = value
@property
def enforce_court(self):
return self._enforce_court
@enforce_court.setter
def enforce_court(self, value):
self._enforce_court = value
@property
def id_number(self):
return self._id_number
@id_number.setter
def id_number(self, value):
self._id_number = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def performance(self):
return self._performance
@performance.setter
def performance(self, value):
self._performance = value
@property
def publish_date(self):
return self._publish_date
@publish_date.setter
def publish_date(self, value):
self._publish_date = value
@property
def region(self):
return self._region
@region.setter
def region(self, value):
self._region = value
def to_alipay_dict(self):
params = dict()
if self.behavior:
if hasattr(self.behavior, 'to_alipay_dict'):
params['behavior'] = self.behavior.to_alipay_dict()
else:
params['behavior'] = self.behavior
if self.case_code:
if hasattr(self.case_code, 'to_alipay_dict'):
params['case_code'] = self.case_code.to_alipay_dict()
else:
params['case_code'] = self.case_code
if self.enforce_court:
if hasattr(self.enforce_court, 'to_alipay_dict'):
params['enforce_court'] = self.enforce_court.to_alipay_dict()
else:
params['enforce_court'] = self.enforce_court
if self.id_number:
if hasattr(self.id_number, 'to_alipay_dict'):
params['id_number'] = self.id_number.to_alipay_dict()
else:
params['id_number'] = self.id_number
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.performance:
if hasattr(self.performance, 'to_alipay_dict'):
params['performance'] = self.performance.to_alipay_dict()
else:
params['performance'] = self.performance
if self.publish_date:
if hasattr(self.publish_date, 'to_alipay_dict'):
params['publish_date'] = self.publish_date.to_alipay_dict()
else:
params['publish_date'] = self.publish_date
if self.region:
if hasattr(self.region, 'to_alipay_dict'):
params['region'] = self.region.to_alipay_dict()
else:
params['region'] = self.region
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DishonestyDetailInfo()
if 'behavior' in d:
o.behavior = d['behavior']
if 'case_code' in d:
o.case_code = d['case_code']
if 'enforce_court' in d:
o.enforce_court = d['enforce_court']
if 'id_number' in d:
o.id_number = d['id_number']
if 'name' in d:
o.name = d['name']
if 'performance' in d:
o.performance = d['performance']
if 'publish_date' in d:
o.publish_date = d['publish_date']
if 'region' in d:
o.region = d['region']
return o
| 29.246575 | 77 | 0.574239 |
import json
from alipay.aop.api.constant.ParamConstants import *
class DishonestyDetailInfo(object):
def __init__(self):
self._behavior = None
self._case_code = None
self._enforce_court = None
self._id_number = None
self._name = None
self._performance = None
self._publish_date = None
self._region = None
@property
def behavior(self):
return self._behavior
@behavior.setter
def behavior(self, value):
self._behavior = value
@property
def case_code(self):
return self._case_code
@case_code.setter
def case_code(self, value):
self._case_code = value
@property
def enforce_court(self):
return self._enforce_court
@enforce_court.setter
def enforce_court(self, value):
self._enforce_court = value
@property
def id_number(self):
return self._id_number
@id_number.setter
def id_number(self, value):
self._id_number = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def performance(self):
return self._performance
@performance.setter
def performance(self, value):
self._performance = value
@property
def publish_date(self):
return self._publish_date
@publish_date.setter
def publish_date(self, value):
self._publish_date = value
@property
def region(self):
return self._region
@region.setter
def region(self, value):
self._region = value
def to_alipay_dict(self):
params = dict()
if self.behavior:
if hasattr(self.behavior, 'to_alipay_dict'):
params['behavior'] = self.behavior.to_alipay_dict()
else:
params['behavior'] = self.behavior
if self.case_code:
if hasattr(self.case_code, 'to_alipay_dict'):
params['case_code'] = self.case_code.to_alipay_dict()
else:
params['case_code'] = self.case_code
if self.enforce_court:
if hasattr(self.enforce_court, 'to_alipay_dict'):
params['enforce_court'] = self.enforce_court.to_alipay_dict()
else:
params['enforce_court'] = self.enforce_court
if self.id_number:
if hasattr(self.id_number, 'to_alipay_dict'):
params['id_number'] = self.id_number.to_alipay_dict()
else:
params['id_number'] = self.id_number
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.performance:
if hasattr(self.performance, 'to_alipay_dict'):
params['performance'] = self.performance.to_alipay_dict()
else:
params['performance'] = self.performance
if self.publish_date:
if hasattr(self.publish_date, 'to_alipay_dict'):
params['publish_date'] = self.publish_date.to_alipay_dict()
else:
params['publish_date'] = self.publish_date
if self.region:
if hasattr(self.region, 'to_alipay_dict'):
params['region'] = self.region.to_alipay_dict()
else:
params['region'] = self.region
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DishonestyDetailInfo()
if 'behavior' in d:
o.behavior = d['behavior']
if 'case_code' in d:
o.case_code = d['case_code']
if 'enforce_court' in d:
o.enforce_court = d['enforce_court']
if 'id_number' in d:
o.id_number = d['id_number']
if 'name' in d:
o.name = d['name']
if 'performance' in d:
o.performance = d['performance']
if 'publish_date' in d:
o.publish_date = d['publish_date']
if 'region' in d:
o.region = d['region']
return o
| true | true |
1c337336b004d37a33e535c259ebd287915bae27 | 4,939 | py | Python | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/_402_cre_decimal.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | 1 | 2020-02-28T12:03:39.000Z | 2020-02-28T12:03:39.000Z | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/_402_cre_decimal.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/_402_cre_decimal.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 3 14:35:15 2018
@author: Kazuki
"""
import os
import pandas as pd
import gc
from multiprocessing import Pool
import multiprocessing
from glob import glob
import utils
utils.start(__file__)
#==============================================================================
KEY = 'SK_ID_CURR'
PREF = 'cre'
NTHREAD = multiprocessing.cpu_count()
# =============================================================================
# load
# =============================================================================
cre = utils.read_pickles('../data/credit_card_balance')
cre.sort_values(['SK_ID_CURR', 'MONTHS_BALANCE'], inplace=True, ascending=[True, False])
base = cre[[KEY]].drop_duplicates().set_index(KEY)
# =============================================================================
# latest
# =============================================================================
latest = cre[cre['MONTHS_BALANCE']==cre.groupby('SK_ID_CURR')['MONTHS_BALANCE'].transform(max)]
c1 = 'NAME_CONTRACT_STATUS'
df_sum = pd.crosstab(latest[KEY], latest[c1])
df_sum.columns = [f'{PREF}_latest_{c1}_{str(c2).replace(" ", "-")}_sum' for c2 in df_sum.columns]
df_norm = pd.crosstab(latest[KEY], latest[c1], normalize='index')
df_norm.columns = [f'{PREF}_latest_{c1}_{str(c2).replace(" ", "-")}_norm' for c2 in df_norm.columns]
df = pd.concat([df_sum, df_norm], axis=1)
col = df.columns.tolist()
base = pd.concat([base, df], axis=1)
base[col] = base[col].fillna(-1)
#base[f'{PREF}_latest_CNT_INSTALMENT_min'] = latest.groupby(KEY).CNT_INSTALMENT.min()
#base[f'{PREF}_latest_CNT_INSTALMENT_mean'] = latest.groupby(KEY).CNT_INSTALMENT.mean()
#base[f'{PREF}_latest_CNT_INSTALMENT_max'] = latest.groupby(KEY).CNT_INSTALMENT.max()
#base[f'{PREF}_latest_CNT_INSTALMENT_max-min'] = base[f'{PREF}_latest_CNT_INSTALMENT_max'] - base[f'{PREF}_latest_CNT_INSTALMENT_min']
# =============================================================================
# binary features
# =============================================================================
col_binary = []
for i in range(1, 11):
cre[f'SK_DPD_over{i}'] = (cre.SK_DPD>=i)*1
col_binary.append(f'SK_DPD_over{i}')
for c in ['Active', 'Completed', 'Signed', 'Sent proposal', 'Refused', 'Demand', 'Approved']:
cre[f'is_{c.replace(" ", "-")}'] = (cre.NAME_CONTRACT_STATUS==c)*1
col_binary.append(f'is_{c.replace(" ", "-")}')
col_binary.append('AMT_CREDIT_LIMIT_ACTUAL_dec')
col_binary.append('AMT_CREDIT_LIMIT_ACTUAL_inc')
ids = cre.SK_ID_CURR.unique()
all_months = pd.DataFrame(list(range(-96, 0)), columns=['MONTHS_BALANCE'])
def to_decimal(x):
x = ''.join(map(str, x))[::-1]
return float(x[0] + '.' + x[1:])
def multi(id_curr):
tmp = cre[cre.SK_ID_CURR==id_curr]
tmp['AMT_CREDIT_LIMIT_ACTUAL'] = tmp['AMT_CREDIT_LIMIT_ACTUAL'].diff(-1)
tmp['AMT_CREDIT_LIMIT_ACTUAL_dec'] = (tmp['AMT_CREDIT_LIMIT_ACTUAL']<0)*1
tmp['AMT_CREDIT_LIMIT_ACTUAL_inc'] = (tmp['AMT_CREDIT_LIMIT_ACTUAL']>0)*1
shortage = all_months[~all_months.MONTHS_BALANCE.isin(tmp['MONTHS_BALANCE'])]
shortage['SK_ID_CURR'] = id_curr
tmp2 = pd.concat([shortage, tmp]).sort_values(['MONTHS_BALANCE'], ascending=False).fillna(0)
tmp2[col_binary] = tmp2[col_binary].astype(int)
gr = tmp2.groupby(['SK_ID_CURR', 'MONTHS_BALANCE'])
tmp_min = gr[col_binary].min().apply(to_decimal)
tmp_max = gr[col_binary].max().apply(to_decimal)
tmp_diff = tmp_max = tmp_min
tmp = pd.concat([
tmp_min.add_prefix(f'{PREF}_').add_suffix('_min-ts'),
tmp_max.add_prefix(f'{PREF}_').add_suffix('_max-ts'),
tmp_diff.add_prefix(f'{PREF}_').add_suffix('_max-min-ts')
])
tmp['SK_ID_CURR'] = id_curr
return tmp
# =============================================================================
# main
# =============================================================================
pool = Pool(NTHREAD)
callback = pool.map(multi, ids)
pool.close()
df = pd.concat(callback, axis=1).T.set_index('SK_ID_CURR')
base = pd.concat([base, df], axis=1)
# =============================================================================
# merge
# =============================================================================
base.reset_index(inplace=True)
if base.columns.duplicated().sum() != 0:
raise Exception( base.columns[base.columns.duplicated()] )
train = utils.load_train([KEY])
train = pd.merge(train, base, on=KEY, how='left').drop(KEY, axis=1)
utils.to_pickles(train, '../data/402_train', utils.SPLIT_SIZE)
del train; gc.collect()
test = utils.load_test([KEY])
test = pd.merge(test, base, on=KEY, how='left').drop(KEY, axis=1)
utils.to_pickles(test, '../data/402_test', utils.SPLIT_SIZE)
del test; gc.collect()
#==============================================================================
utils.end(__file__)
| 36.858209 | 134 | 0.554161 |
import os
import pandas as pd
import gc
from multiprocessing import Pool
import multiprocessing
from glob import glob
import utils
utils.start(__file__)
KEY = 'SK_ID_CURR'
PREF = 'cre'
NTHREAD = multiprocessing.cpu_count()
cre = utils.read_pickles('../data/credit_card_balance')
cre.sort_values(['SK_ID_CURR', 'MONTHS_BALANCE'], inplace=True, ascending=[True, False])
base = cre[[KEY]].drop_duplicates().set_index(KEY)
latest = cre[cre['MONTHS_BALANCE']==cre.groupby('SK_ID_CURR')['MONTHS_BALANCE'].transform(max)]
c1 = 'NAME_CONTRACT_STATUS'
df_sum = pd.crosstab(latest[KEY], latest[c1])
df_sum.columns = [f'{PREF}_latest_{c1}_{str(c2).replace(" ", "-")}_sum' for c2 in df_sum.columns]
df_norm = pd.crosstab(latest[KEY], latest[c1], normalize='index')
df_norm.columns = [f'{PREF}_latest_{c1}_{str(c2).replace(" ", "-")}_norm' for c2 in df_norm.columns]
df = pd.concat([df_sum, df_norm], axis=1)
col = df.columns.tolist()
base = pd.concat([base, df], axis=1)
base[col] = base[col].fillna(-1)
col_binary = []
for i in range(1, 11):
cre[f'SK_DPD_over{i}'] = (cre.SK_DPD>=i)*1
col_binary.append(f'SK_DPD_over{i}')
for c in ['Active', 'Completed', 'Signed', 'Sent proposal', 'Refused', 'Demand', 'Approved']:
cre[f'is_{c.replace(" ", "-")}'] = (cre.NAME_CONTRACT_STATUS==c)*1
col_binary.append(f'is_{c.replace(" ", "-")}')
col_binary.append('AMT_CREDIT_LIMIT_ACTUAL_dec')
col_binary.append('AMT_CREDIT_LIMIT_ACTUAL_inc')
ids = cre.SK_ID_CURR.unique()
all_months = pd.DataFrame(list(range(-96, 0)), columns=['MONTHS_BALANCE'])
def to_decimal(x):
x = ''.join(map(str, x))[::-1]
return float(x[0] + '.' + x[1:])
def multi(id_curr):
tmp = cre[cre.SK_ID_CURR==id_curr]
tmp['AMT_CREDIT_LIMIT_ACTUAL'] = tmp['AMT_CREDIT_LIMIT_ACTUAL'].diff(-1)
tmp['AMT_CREDIT_LIMIT_ACTUAL_dec'] = (tmp['AMT_CREDIT_LIMIT_ACTUAL']<0)*1
tmp['AMT_CREDIT_LIMIT_ACTUAL_inc'] = (tmp['AMT_CREDIT_LIMIT_ACTUAL']>0)*1
shortage = all_months[~all_months.MONTHS_BALANCE.isin(tmp['MONTHS_BALANCE'])]
shortage['SK_ID_CURR'] = id_curr
tmp2 = pd.concat([shortage, tmp]).sort_values(['MONTHS_BALANCE'], ascending=False).fillna(0)
tmp2[col_binary] = tmp2[col_binary].astype(int)
gr = tmp2.groupby(['SK_ID_CURR', 'MONTHS_BALANCE'])
tmp_min = gr[col_binary].min().apply(to_decimal)
tmp_max = gr[col_binary].max().apply(to_decimal)
tmp_diff = tmp_max = tmp_min
tmp = pd.concat([
tmp_min.add_prefix(f'{PREF}_').add_suffix('_min-ts'),
tmp_max.add_prefix(f'{PREF}_').add_suffix('_max-ts'),
tmp_diff.add_prefix(f'{PREF}_').add_suffix('_max-min-ts')
])
tmp['SK_ID_CURR'] = id_curr
return tmp
pool = Pool(NTHREAD)
callback = pool.map(multi, ids)
pool.close()
df = pd.concat(callback, axis=1).T.set_index('SK_ID_CURR')
base = pd.concat([base, df], axis=1)
base.reset_index(inplace=True)
if base.columns.duplicated().sum() != 0:
raise Exception( base.columns[base.columns.duplicated()] )
train = utils.load_train([KEY])
train = pd.merge(train, base, on=KEY, how='left').drop(KEY, axis=1)
utils.to_pickles(train, '../data/402_train', utils.SPLIT_SIZE)
del train; gc.collect()
test = utils.load_test([KEY])
test = pd.merge(test, base, on=KEY, how='left').drop(KEY, axis=1)
utils.to_pickles(test, '../data/402_test', utils.SPLIT_SIZE)
del test; gc.collect()
utils.end(__file__)
| true | true |
1c3373c42e34c54544e374c1eb57af33cec00da7 | 2,569 | py | Python | python/word-count/word_count_test.py | stevenandrewcarter/exercism | db02c2f16d9cff878120208beccb3d09305a9df0 | [
"MIT"
] | 1 | 2021-02-12T18:54:17.000Z | 2021-02-12T18:54:17.000Z | python/word-count/word_count_test.py | stevenandrewcarter/exercism | db02c2f16d9cff878120208beccb3d09305a9df0 | [
"MIT"
] | null | null | null | python/word-count/word_count_test.py | stevenandrewcarter/exercism | db02c2f16d9cff878120208beccb3d09305a9df0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from wordcount import word_count
# to be backwards compatible with the old Python 2.X
def decode_if_needed(string):
try:
return string.decode('utf-8')
except AttributeError:
return string
class WordCountTests(unittest.TestCase):
def test_count_one_word(self):
self.assertEqual(
{'word': 1},
word_count('word')
)
def test_count_one_of_each(self):
self.assertEqual(
{'one': 1, 'of': 1, 'each': 1},
word_count('one of each')
)
def test_count_multiple_occurences(self):
self.assertEqual(
{'one': 1, 'fish': 4, 'two': 1, 'red': 1, 'blue': 1},
word_count('one fish two fish red fish blue fish')
)
def test_preserves_punctuation(self):
self.assertEqual(
{'car': 1, 'carpet': 1, 'as': 1, 'java': 1, 'javascript': 1},
word_count('car : carpet as java : javascript!!&@$%^&')
)
def test_include_numbers(self):
self.assertEqual(
{'testing': 2, '1': 1, '2': 1},
word_count('testing 1 2 testing')
)
def test_mixed_case(self):
self.assertEqual(
[2, 3],
sorted(list(word_count('go Go GO Stop stop').values()))
)
def test_multiple_spaces(self):
self.assertEqual(
{'wait': 1, 'for': 1, 'it': 1},
word_count('wait for it')
)
def test_newlines(self):
self.assertEqual(
{'rah': 2, 'ah': 3, 'roma': 2, 'ma': 1, 'ga': 2, 'oh': 1, 'la': 2,
'want': 1, 'your': 1, 'bad': 1, 'romance': 1},
word_count('rah rah ah ah ah\nroma roma ma\n'
'ga ga oh la la\nwant your bad romance')
)
def test_tabs(self):
self.assertEqual(
{'rah': 2, 'ah': 3, 'roma': 2, 'ma': 1, 'ga': 2, 'oh': 1, 'la': 2,
'want': 1, 'your': 1, 'bad': 1, 'romance': 1},
word_count('rah rah ah ah ah\troma roma ma\tga ga oh la la\t'
'want your bad romance')
)
def test_non_alphanumeric(self):
self.assertEqual(
{'hey': 1, 'my': 1, 'spacebar': 1, 'is': 1, 'broken': 1},
word_count('hey,my_spacebar_is_broken.')
)
def test_unicode(self):
self.assertEqual(
{decode_if_needed('до'): 1, decode_if_needed('свидания'): 1},
word_count('до🖖свидания!')
)
if __name__ == '__main__':
unittest.main()
| 28.865169 | 78 | 0.513429 |
import unittest
from wordcount import word_count
def decode_if_needed(string):
try:
return string.decode('utf-8')
except AttributeError:
return string
class WordCountTests(unittest.TestCase):
def test_count_one_word(self):
self.assertEqual(
{'word': 1},
word_count('word')
)
def test_count_one_of_each(self):
self.assertEqual(
{'one': 1, 'of': 1, 'each': 1},
word_count('one of each')
)
def test_count_multiple_occurences(self):
self.assertEqual(
{'one': 1, 'fish': 4, 'two': 1, 'red': 1, 'blue': 1},
word_count('one fish two fish red fish blue fish')
)
def test_preserves_punctuation(self):
self.assertEqual(
{'car': 1, 'carpet': 1, 'as': 1, 'java': 1, 'javascript': 1},
word_count('car : carpet as java : javascript!!&@$%^&')
)
def test_include_numbers(self):
self.assertEqual(
{'testing': 2, '1': 1, '2': 1},
word_count('testing 1 2 testing')
)
def test_mixed_case(self):
self.assertEqual(
[2, 3],
sorted(list(word_count('go Go GO Stop stop').values()))
)
def test_multiple_spaces(self):
self.assertEqual(
{'wait': 1, 'for': 1, 'it': 1},
word_count('wait for it')
)
def test_newlines(self):
self.assertEqual(
{'rah': 2, 'ah': 3, 'roma': 2, 'ma': 1, 'ga': 2, 'oh': 1, 'la': 2,
'want': 1, 'your': 1, 'bad': 1, 'romance': 1},
word_count('rah rah ah ah ah\nroma roma ma\n'
'ga ga oh la la\nwant your bad romance')
)
def test_tabs(self):
self.assertEqual(
{'rah': 2, 'ah': 3, 'roma': 2, 'ma': 1, 'ga': 2, 'oh': 1, 'la': 2,
'want': 1, 'your': 1, 'bad': 1, 'romance': 1},
word_count('rah rah ah ah ah\troma roma ma\tga ga oh la la\t'
'want your bad romance')
)
def test_non_alphanumeric(self):
self.assertEqual(
{'hey': 1, 'my': 1, 'spacebar': 1, 'is': 1, 'broken': 1},
word_count('hey,my_spacebar_is_broken.')
)
def test_unicode(self):
self.assertEqual(
{decode_if_needed('до'): 1, decode_if_needed('свидания'): 1},
word_count('до🖖свидания!')
)
if __name__ == '__main__':
unittest.main()
| true | true |
1c3375a1a8258dedbd7bb99777fb79d19fde143e | 672 | py | Python | testing_individual_section.py | OpenHRS/openhrs-scraper-app | be8b2dcf6b4612642700314fd7259cd0f5613742 | [
"MIT"
] | 3 | 2017-11-13T10:35:12.000Z | 2017-11-19T09:29:02.000Z | testing_individual_section.py | OpenHRS/openhrs-scraper-app | be8b2dcf6b4612642700314fd7259cd0f5613742 | [
"MIT"
] | 12 | 2017-10-01T12:35:58.000Z | 2018-08-19T07:47:00.000Z | testing_individual_section.py | OpenHRS/openhrs-scraper-app | be8b2dcf6b4612642700314fd7259cd0f5613742 | [
"MIT"
] | 1 | 2020-01-09T04:27:46.000Z | 2020-01-09T04:27:46.000Z | import json
import requests
import re
from bs4 import BeautifulSoup as bs
from create_hrs_tree import get_section_text_data
def main():
Section = {"name": 'Risk of loss',
"number": '2A-219'}
baseURL = 'http://www.capitol.hawaii.gov/hrscurrent/Vol11_Ch0476-0490/HRS0490/HRS_0490-.htm'
section_text = get_section_text_data(baseURL, "2A-219")
Section['text'] = section_text
outfile = open('output/testing_individual_section.json', 'w')
json.dump(Section, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
print("Data scraped into testing_individual_section.json")
if __name__ == '__main__':
main()
| 26.88 | 96 | 0.686012 | import json
import requests
import re
from bs4 import BeautifulSoup as bs
from create_hrs_tree import get_section_text_data
def main():
Section = {"name": 'Risk of loss',
"number": '2A-219'}
baseURL = 'http://www.capitol.hawaii.gov/hrscurrent/Vol11_Ch0476-0490/HRS0490/HRS_0490-.htm'
section_text = get_section_text_data(baseURL, "2A-219")
Section['text'] = section_text
outfile = open('output/testing_individual_section.json', 'w')
json.dump(Section, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
print("Data scraped into testing_individual_section.json")
if __name__ == '__main__':
main()
| true | true |
1c3375b4eab451b16c40c709077979721739d1e9 | 2,994 | py | Python | experiments/murtaza/ros/sawyer_sim/sawyer_pos_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/murtaza/ros/sawyer_sim/sawyer_pos_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/murtaza/ros/sawyer_sim/sawyer_pos_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.networks import ConcatMlp
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.sac.sac import SoftActorCritic
from sawyer_control.sawyer_reaching import SawyerXYZReachingEnv
import numpy as np
import rlkit.misc.hyperparameter as hyp
def experiment(variant):
env_params = variant['env_params']
env = SawyerXYZReachingEnv(**env_params)
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = variant['net_size']
qf = ConcatMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim + action_dim,
output_size=1,
)
vf = ConcatMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim,
output_size=1,
)
policy = TanhGaussianPolicy(
hidden_sizes=[net_size, net_size],
obs_dim=obs_dim,
action_dim=action_dim,
)
algorithm = SoftActorCritic(
env=env,
policy=policy,
qf=qf,
vf=vf,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
num_epochs = 50
num_steps_per_epoch=100
num_steps_per_eval=100
max_path_length=20
variant = dict(
algo_params=dict(
num_epochs=num_epochs,
num_steps_per_epoch=num_steps_per_epoch,
num_steps_per_eval=num_steps_per_eval,
max_path_length=max_path_length,
batch_size=64,
discount=1,
soft_target_tau=0.01,
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
),
net_size=300,
env_params=dict(
desired=[0.97711039, 0.56662792, 0.27901027],
action_mode='position',
reward='norm',
safety_box=False,
)
)
search_space = {
'algo_params.reward_scale': [
1,
10,
100,
1000,
],
'algo_params.num_updates_per_env_step': [
5,
10,
15,
20,
35,
],
'algo_params.soft_target_tau': [
.01,
.001,
],
'env_params.randomize_goal_on_reset':[
True,
False,
],
'net_size':[
200,
300,
400,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
for variant in sweeper.iterate_hyperparameters():
exp_prefix = 'sawyer_simulated_sac_reaching_pos_cntrl'
mode = 'here_no_doodad'
for i in range(n_seeds):
run_experiment(
experiment,
mode=mode,
exp_prefix=exp_prefix,
variant=variant,
)
| 26.732143 | 63 | 0.573146 | from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.networks import ConcatMlp
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.sac.sac import SoftActorCritic
from sawyer_control.sawyer_reaching import SawyerXYZReachingEnv
import numpy as np
import rlkit.misc.hyperparameter as hyp
def experiment(variant):
env_params = variant['env_params']
env = SawyerXYZReachingEnv(**env_params)
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = variant['net_size']
qf = ConcatMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim + action_dim,
output_size=1,
)
vf = ConcatMlp(
hidden_sizes=[net_size, net_size],
input_size=obs_dim,
output_size=1,
)
policy = TanhGaussianPolicy(
hidden_sizes=[net_size, net_size],
obs_dim=obs_dim,
action_dim=action_dim,
)
algorithm = SoftActorCritic(
env=env,
policy=policy,
qf=qf,
vf=vf,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
num_epochs = 50
num_steps_per_epoch=100
num_steps_per_eval=100
max_path_length=20
variant = dict(
algo_params=dict(
num_epochs=num_epochs,
num_steps_per_epoch=num_steps_per_epoch,
num_steps_per_eval=num_steps_per_eval,
max_path_length=max_path_length,
batch_size=64,
discount=1,
soft_target_tau=0.01,
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
),
net_size=300,
env_params=dict(
desired=[0.97711039, 0.56662792, 0.27901027],
action_mode='position',
reward='norm',
safety_box=False,
)
)
search_space = {
'algo_params.reward_scale': [
1,
10,
100,
1000,
],
'algo_params.num_updates_per_env_step': [
5,
10,
15,
20,
35,
],
'algo_params.soft_target_tau': [
.01,
.001,
],
'env_params.randomize_goal_on_reset':[
True,
False,
],
'net_size':[
200,
300,
400,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
for variant in sweeper.iterate_hyperparameters():
exp_prefix = 'sawyer_simulated_sac_reaching_pos_cntrl'
mode = 'here_no_doodad'
for i in range(n_seeds):
run_experiment(
experiment,
mode=mode,
exp_prefix=exp_prefix,
variant=variant,
)
| true | true |
1c337605d45899ca95adf8c7fcedb4e9ce240822 | 4,528 | py | Python | api/src/opentrons/hardware_control/emulation/thermocycler.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/hardware_control/emulation/thermocycler.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/hardware_control/emulation/thermocycler.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | """An emulation of the opentrons thermocycler module.
The purpose is to provide a fake backend that responds to GCODE commands.
"""
import logging
from typing import Optional
from opentrons.drivers.thermocycler.driver import GCODE
from opentrons.drivers.types import ThermocyclerLidStatus
from opentrons.hardware_control.emulation.parser import Parser, Command
from .abstract_emulator import AbstractEmulator
from .simulations import Temperature, TemperatureWithHold
from . import util
logger = logging.getLogger(__name__)
SERIAL = "thermocycler_emulator"
MODEL = "v02"
VERSION = "v1.1.0"
class ThermocyclerEmulator(AbstractEmulator):
"""Thermocycler emulator"""
def __init__(self, parser: Parser) -> None:
self.reset()
self._parser = parser
def handle(self, line: str) -> Optional[str]:
"""Handle a line"""
results = (self._handle(c) for c in self._parser.parse(line))
joined = ' '.join(r for r in results if r)
return None if not joined else joined
def reset(self):
self._lid_temperate = Temperature(
per_tick=2, current=util.TEMPERATURE_ROOM
)
self._plate_temperate = TemperatureWithHold(
per_tick=2, current=util.TEMPERATURE_ROOM
)
self.lid_status = ThermocyclerLidStatus.OPEN
self.plate_volume = util.OptionalValue[float]()
self.plate_ramp_rate = util.OptionalValue[float]()
def _handle(self, command: Command) -> Optional[str]: # noqa: C901
"""
Handle a command.
TODO: AL 20210218 create dispatch map and remove 'noqa(C901)'
"""
logger.info(f"Got command {command}")
if command.gcode == GCODE.OPEN_LID:
self.lid_status = ThermocyclerLidStatus.OPEN
elif command.gcode == GCODE.CLOSE_LID:
self.lid_status = ThermocyclerLidStatus.CLOSED
elif command.gcode == GCODE.GET_LID_STATUS:
return f"Lid:{self.lid_status}"
elif command.gcode == GCODE.SET_LID_TEMP:
temperature = command.params['S']
assert isinstance(temperature, float),\
f"invalid temperature '{temperature}'"
self._lid_temperate.set_target(temperature)
elif command.gcode == GCODE.GET_LID_TEMP:
res = f"T:{util.OptionalValue(self._lid_temperate.target)} " \
f"C:{self._lid_temperate.current} " \
f"H:none Total_H:none"
self._lid_temperate.tick()
return res
elif command.gcode == GCODE.EDIT_PID_PARAMS:
pass
elif command.gcode == GCODE.SET_PLATE_TEMP:
for prefix, value in command.params.items():
assert isinstance(value, float), f"invalid value '{value}'"
if prefix == 'S':
self._plate_temperate.set_target(value)
elif prefix == 'V':
self.plate_volume.val = value
elif prefix == 'H':
self._plate_temperate.set_hold(value)
elif command.gcode == GCODE.GET_PLATE_TEMP:
plate_target = util.OptionalValue(self._plate_temperate.target)
plate_current = self._plate_temperate.current
plate_time_remaining = util.OptionalValue(
self._plate_temperate.time_remaining
)
plate_total_hold_time = util.OptionalValue(
self._plate_temperate.total_hold
)
res = f"T:{plate_target} " \
f"C:{plate_current} " \
f"H:{plate_time_remaining} " \
f"Total_H:{plate_total_hold_time} "
self._plate_temperate.tick()
return res
elif command.gcode == GCODE.SET_RAMP_RATE:
self.plate_ramp_rate.val = command.params['S']
elif command.gcode == GCODE.DEACTIVATE_ALL:
self._plate_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
self._lid_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEACTIVATE_LID:
self._lid_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEACTIVATE_BLOCK:
self._plate_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEVICE_INFO:
return f"serial:{SERIAL} model:{MODEL} version:{VERSION}"
return None
@staticmethod
def get_terminator() -> bytes:
return b'\r\n'
| 39.373913 | 79 | 0.634496 |
import logging
from typing import Optional
from opentrons.drivers.thermocycler.driver import GCODE
from opentrons.drivers.types import ThermocyclerLidStatus
from opentrons.hardware_control.emulation.parser import Parser, Command
from .abstract_emulator import AbstractEmulator
from .simulations import Temperature, TemperatureWithHold
from . import util
logger = logging.getLogger(__name__)
SERIAL = "thermocycler_emulator"
MODEL = "v02"
VERSION = "v1.1.0"
class ThermocyclerEmulator(AbstractEmulator):
def __init__(self, parser: Parser) -> None:
self.reset()
self._parser = parser
def handle(self, line: str) -> Optional[str]:
results = (self._handle(c) for c in self._parser.parse(line))
joined = ' '.join(r for r in results if r)
return None if not joined else joined
def reset(self):
self._lid_temperate = Temperature(
per_tick=2, current=util.TEMPERATURE_ROOM
)
self._plate_temperate = TemperatureWithHold(
per_tick=2, current=util.TEMPERATURE_ROOM
)
self.lid_status = ThermocyclerLidStatus.OPEN
self.plate_volume = util.OptionalValue[float]()
self.plate_ramp_rate = util.OptionalValue[float]()
def _handle(self, command: Command) -> Optional[str]:
logger.info(f"Got command {command}")
if command.gcode == GCODE.OPEN_LID:
self.lid_status = ThermocyclerLidStatus.OPEN
elif command.gcode == GCODE.CLOSE_LID:
self.lid_status = ThermocyclerLidStatus.CLOSED
elif command.gcode == GCODE.GET_LID_STATUS:
return f"Lid:{self.lid_status}"
elif command.gcode == GCODE.SET_LID_TEMP:
temperature = command.params['S']
assert isinstance(temperature, float),\
f"invalid temperature '{temperature}'"
self._lid_temperate.set_target(temperature)
elif command.gcode == GCODE.GET_LID_TEMP:
res = f"T:{util.OptionalValue(self._lid_temperate.target)} " \
f"C:{self._lid_temperate.current} " \
f"H:none Total_H:none"
self._lid_temperate.tick()
return res
elif command.gcode == GCODE.EDIT_PID_PARAMS:
pass
elif command.gcode == GCODE.SET_PLATE_TEMP:
for prefix, value in command.params.items():
assert isinstance(value, float), f"invalid value '{value}'"
if prefix == 'S':
self._plate_temperate.set_target(value)
elif prefix == 'V':
self.plate_volume.val = value
elif prefix == 'H':
self._plate_temperate.set_hold(value)
elif command.gcode == GCODE.GET_PLATE_TEMP:
plate_target = util.OptionalValue(self._plate_temperate.target)
plate_current = self._plate_temperate.current
plate_time_remaining = util.OptionalValue(
self._plate_temperate.time_remaining
)
plate_total_hold_time = util.OptionalValue(
self._plate_temperate.total_hold
)
res = f"T:{plate_target} " \
f"C:{plate_current} " \
f"H:{plate_time_remaining} " \
f"Total_H:{plate_total_hold_time} "
self._plate_temperate.tick()
return res
elif command.gcode == GCODE.SET_RAMP_RATE:
self.plate_ramp_rate.val = command.params['S']
elif command.gcode == GCODE.DEACTIVATE_ALL:
self._plate_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
self._lid_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEACTIVATE_LID:
self._lid_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEACTIVATE_BLOCK:
self._plate_temperate.deactivate(temperature=util.TEMPERATURE_ROOM)
elif command.gcode == GCODE.DEVICE_INFO:
return f"serial:{SERIAL} model:{MODEL} version:{VERSION}"
return None
@staticmethod
def get_terminator() -> bytes:
return b'\r\n'
| true | true |
1c33763f08e3455be5c1b27f67dfa283532c92fc | 81 | py | Python | app-engine-by-example/local_constants.py | draescherl/griffith-cloud-plaforms-applications | 97f86f0a6cb40e3c66572023d7d7de7b49497b49 | [
"MIT"
] | null | null | null | app-engine-by-example/local_constants.py | draescherl/griffith-cloud-plaforms-applications | 97f86f0a6cb40e3c66572023d7d7de7b49497b49 | [
"MIT"
] | null | null | null | app-engine-by-example/local_constants.py | draescherl/griffith-cloud-plaforms-applications | 97f86f0a6cb40e3c66572023d7d7de7b49497b49 | [
"MIT"
] | null | null | null | PROJECT_NAME='gae-by-example'
PROJECT_STORAGE_BUCKET='gae-by-example.appspot.com' | 40.5 | 51 | 0.839506 | PROJECT_NAME='gae-by-example'
PROJECT_STORAGE_BUCKET='gae-by-example.appspot.com' | true | true |
1c33772aba5b57d2f7e626c7d7dec345fd5596bd | 156 | py | Python | configs/prod.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | configs/prod.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | configs/prod.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | DEBUG = False
TESTING = False
DROPBOX_ACCESS_TOKEN = 'mTUuFHFyeNAAAAAAAAAAISJUHjSYan7RFlrRfJEdnzT1aUSKv59aLJFJMnzbS0uT'
DROPBO_PROJECT_PATH = '/binaryAPI'
| 26 | 89 | 0.858974 | DEBUG = False
TESTING = False
DROPBOX_ACCESS_TOKEN = 'mTUuFHFyeNAAAAAAAAAAISJUHjSYan7RFlrRfJEdnzT1aUSKv59aLJFJMnzbS0uT'
DROPBO_PROJECT_PATH = '/binaryAPI'
| true | true |
1c33775ff90dde5df7953bff084cd11b14119b45 | 8,034 | py | Python | python/kfserving/kfserving/models/v1alpha2_inference_service.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | 6 | 2022-02-15T21:54:19.000Z | 2022-02-16T21:18:54.000Z | python/kfserving/kfserving/models/v1alpha2_inference_service.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | 635 | 2021-01-29T07:06:06.000Z | 2022-03-31T09:09:20.000Z | python/kfserving/kfserving/models/v1alpha2_inference_service.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | 4 | 2021-02-15T23:02:53.000Z | 2022-01-27T22:54:16.000Z | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1alpha2InferenceService(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha2InferenceServiceSpec',
'status': 'V1alpha2InferenceServiceStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1alpha2InferenceService - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1alpha2InferenceService. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha2InferenceService. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha2InferenceService.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha2InferenceService. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha2InferenceService. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha2InferenceService. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha2InferenceService.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha2InferenceService. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha2InferenceService. # noqa: E501
:return: The metadata of this V1alpha2InferenceService. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha2InferenceService.
:param metadata: The metadata of this V1alpha2InferenceService. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha2InferenceService. # noqa: E501
:return: The spec of this V1alpha2InferenceService. # noqa: E501
:rtype: V1alpha2InferenceServiceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha2InferenceService.
:param spec: The spec of this V1alpha2InferenceService. # noqa: E501
:type: V1alpha2InferenceServiceSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1alpha2InferenceService. # noqa: E501
:return: The status of this V1alpha2InferenceService. # noqa: E501
:rtype: V1alpha2InferenceServiceStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1alpha2InferenceService.
:param status: The status of this V1alpha2InferenceService. # noqa: E501
:type: V1alpha2InferenceServiceStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2InferenceService):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha2InferenceService):
return True
return self.to_dict() != other.to_dict()
| 33.061728 | 312 | 0.638163 |
import pprint
import re
import six
from kfserving.configuration import Configuration
class V1alpha2InferenceService(object):
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha2InferenceServiceSpec',
'status': 'V1alpha2InferenceServiceStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, spec):
self._spec = spec
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1alpha2InferenceService):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1alpha2InferenceService):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c337772a3c0152c7169b07e964047635ab78aae | 3,444 | py | Python | Intro_to_Recommendation_Systems/DeepRecommender/data_utils/movielens_data_convert.py | NunoEdgarGFlowHub/sciblog_support | 5ef38b935ad45edd1cf01b92b5184282b4ee25fa | [
"BSD-3-Clause"
] | 668 | 2016-11-27T13:12:40.000Z | 2022-02-18T09:12:19.000Z | Intro_to_Recommendation_Systems/DeepRecommender/data_utils/movielens_data_convert.py | mohamedndiaye/sciblog_support | 0d5b956bacbd40d1a1fc07582aba728abfb59f54 | [
"BSD-3-Clause"
] | 99 | 2016-11-01T08:18:44.000Z | 2021-02-25T06:16:39.000Z | Intro_to_Recommendation_Systems/DeepRecommender/data_utils/movielens_data_convert.py | mohamedndiaye/sciblog_support | 0d5b956bacbd40d1a1fc07582aba728abfb59f54 | [
"BSD-3-Clause"
] | 185 | 2016-11-29T11:11:33.000Z | 2022-03-31T09:14:41.000Z | # Copyright (c) 2017 NVIDIA Corporation
import sys
import datetime
import random
from math import floor
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def main(args):
inpt = args[1]
out_prefix = args[2]
percent = 0.7
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
data = dict()
min_ts = 100000000000
max_ts = 0
total_rating_count = 0
with open(inpt, 'r') as inpt_f: #ratings.csv headers: userId,movieId,rating,timestamp
for line in inpt_f:
if 'userId' in line:
continue
parts = line.split(',')
user = int(parts[0])
item = int(parts[1])
rating = float(parts[2])
ts = int(parts[3])
if min_ts > ts:
min_ts = ts
if max_ts < ts:
max_ts = ts
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
total_rating_count += 1
if user2id_map[user] not in data:
data[user2id_map[user]] = []
data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS")
print("Total Ratings: {}".format(total_rating_count))
print("Total User count: {}".format(len(user2id_map)))
print("Total Item count: {}".format(len(item2id_map)))
print("Minimum ts: {}, which is {}".format(min_ts, datetime.datetime.fromtimestamp(min_ts).strftime('%Y-%m-%d')))
print("Maximum ts: {}, which is {}".format(max_ts, datetime.datetime.fromtimestamp(max_ts).strftime('%Y-%m-%d')))
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId in data.keys():
if len(data[userId]) < 2:
#print("WARNING, userId {} has less than 2 ratings, skipping user...".format(userId))
continue
time_sorted_ratings = sorted(data[userId], key=lambda x: x[2]) # sort by timestamp
last_train_ind = floor(percent * len(time_sorted_ratings))
training_data[userId] = time_sorted_ratings[:last_train_ind]
for rating_item in time_sorted_ratings[:last_train_ind]:
train_set_items.add(rating_item[0]) # keep track of items from training set
p = random.random()
if p <= 0.5:
validation_data[userId] = time_sorted_ratings[last_train_ind:]
else:
test_data[userId] = time_sorted_ratings[last_train_ind:]
# remove items not not seen in training set
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
print("Training Data")
print_stats(training_data)
save_data_to_file(training_data, out_prefix+".train")
print("Validation Data")
print_stats(validation_data)
save_data_to_file(validation_data, out_prefix + ".valid")
print("Test Data")
print_stats(test_data)
save_data_to_file(test_data, out_prefix + ".test")
if __name__ == "__main__":
main(sys.argv) | 33.115385 | 115 | 0.671312 |
import sys
import datetime
import random
from math import floor
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def main(args):
inpt = args[1]
out_prefix = args[2]
percent = 0.7
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
data = dict()
min_ts = 100000000000
max_ts = 0
total_rating_count = 0
with open(inpt, 'r') as inpt_f:
for line in inpt_f:
if 'userId' in line:
continue
parts = line.split(',')
user = int(parts[0])
item = int(parts[1])
rating = float(parts[2])
ts = int(parts[3])
if min_ts > ts:
min_ts = ts
if max_ts < ts:
max_ts = ts
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
total_rating_count += 1
if user2id_map[user] not in data:
data[user2id_map[user]] = []
data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS")
print("Total Ratings: {}".format(total_rating_count))
print("Total User count: {}".format(len(user2id_map)))
print("Total Item count: {}".format(len(item2id_map)))
print("Minimum ts: {}, which is {}".format(min_ts, datetime.datetime.fromtimestamp(min_ts).strftime('%Y-%m-%d')))
print("Maximum ts: {}, which is {}".format(max_ts, datetime.datetime.fromtimestamp(max_ts).strftime('%Y-%m-%d')))
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId in data.keys():
if len(data[userId]) < 2:
continue
time_sorted_ratings = sorted(data[userId], key=lambda x: x[2])
last_train_ind = floor(percent * len(time_sorted_ratings))
training_data[userId] = time_sorted_ratings[:last_train_ind]
for rating_item in time_sorted_ratings[:last_train_ind]:
train_set_items.add(rating_item[0])
p = random.random()
if p <= 0.5:
validation_data[userId] = time_sorted_ratings[last_train_ind:]
else:
test_data[userId] = time_sorted_ratings[last_train_ind:]
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
print("Training Data")
print_stats(training_data)
save_data_to_file(training_data, out_prefix+".train")
print("Validation Data")
print_stats(validation_data)
save_data_to_file(validation_data, out_prefix + ".valid")
print("Test Data")
print_stats(test_data)
save_data_to_file(test_data, out_prefix + ".test")
if __name__ == "__main__":
main(sys.argv) | true | true |
1c33778216c6074870c586d573ca2ba9195b1476 | 1,047 | py | Python | editor/attributes/player/player_attribute_fk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | 1 | 2022-03-11T12:25:57.000Z | 2022-03-11T12:25:57.000Z | editor/attributes/player/player_attribute_fk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | null | null | null | editor/attributes/player/player_attribute_fk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | null | null | null | from editor.attributes.player.player_attribute import (
PlayerAttribute,
PlayerAttributeTypes,
)
class PlayerAttributeFkStyle(PlayerAttribute):
@classmethod
def att_class_name(cls):
return "FK Style"
@classmethod
def att_class_type(cls):
return PlayerAttributeTypes.BasicSettings
def get_raw_value(self):
return self.parent.get_value()
def get_value(self):
return self.parent.get_value()
def get_label(self):
"""
Get full label from parent
and return second value (FK Style is set second)
"""
full_label = self.parent.get_label()
return full_label[1]
def set_value(self, value):
return self.parent.set_value(value)
def set_value_from_label(self, label):
stronger_foot_label = self.parent.stronger_foot.get_label()
pk_style_label = self.parent.pk_style.get_label()
full_label = (stronger_foot_label, label, pk_style_label)
return self.parent.set_value_from_label(full_label)
| 26.846154 | 67 | 0.683859 | from editor.attributes.player.player_attribute import (
PlayerAttribute,
PlayerAttributeTypes,
)
class PlayerAttributeFkStyle(PlayerAttribute):
@classmethod
def att_class_name(cls):
return "FK Style"
@classmethod
def att_class_type(cls):
return PlayerAttributeTypes.BasicSettings
def get_raw_value(self):
return self.parent.get_value()
def get_value(self):
return self.parent.get_value()
def get_label(self):
full_label = self.parent.get_label()
return full_label[1]
def set_value(self, value):
return self.parent.set_value(value)
def set_value_from_label(self, label):
stronger_foot_label = self.parent.stronger_foot.get_label()
pk_style_label = self.parent.pk_style.get_label()
full_label = (stronger_foot_label, label, pk_style_label)
return self.parent.set_value_from_label(full_label)
| true | true |
1c33781d337fcd41bdfddab8f6383b37366a2364 | 346 | py | Python | users/models.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | users/models.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | users/models.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(to=User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile')
def __str__(self):
return f"{self.user.username}'s Profile"
| 26.615385 | 73 | 0.734104 | from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(to=User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile')
def __str__(self):
return f"{self.user.username}'s Profile"
| true | true |
1c3378e86defbbc7f3668544036cb7b6f2d8b7c3 | 100 | py | Python | keras_textclassification/data/model/__init__.py | luoyudong593/Keras-TextClassification | b3e6966b5dbc7f425522074e2043fbff0614de84 | [
"MIT"
] | 1,339 | 2019-06-13T15:34:46.000Z | 2022-03-31T11:24:09.000Z | keras_textclassification/data/model/__init__.py | zhangshixing-chn/Keras-TextClassification | 640e3f44f90d9d8046546f7e1a93a29ebe5c8d30 | [
"MIT"
] | 75 | 2019-06-25T06:38:27.000Z | 2022-03-25T06:48:19.000Z | keras_textclassification/data/model/__init__.py | zhangshixing-chn/Keras-TextClassification | 640e3f44f90d9d8046546f7e1a93a29ebe5c8d30 | [
"MIT"
] | 400 | 2019-06-17T03:00:48.000Z | 2022-03-23T07:00:53.000Z | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/8/28 2:35
# @author :Mo
# @function : | 20 | 27 | 0.52 | true | true | |
1c33797ae21b5684e8af23125a3d72721bee019a | 1,554 | py | Python | examples/hello_world.py | keredson/tinyweb | 9b4619cef1bf3c1c25c3ec970c13a117e4b49346 | [
"MIT"
] | 138 | 2018-01-22T20:57:38.000Z | 2022-03-20T20:25:57.000Z | examples/hello_world.py | davidmoshal/tinyweb | 33d73c90f005cfd3423affe358cad0f13c37728f | [
"MIT"
] | 43 | 2018-01-04T19:40:50.000Z | 2022-03-11T04:13:56.000Z | examples/hello_world.py | davidmoshal/tinyweb | 33d73c90f005cfd3423affe358cad0f13c37728f | [
"MIT"
] | 31 | 2018-01-04T19:44:43.000Z | 2022-02-25T18:22:59.000Z | #!/usr/bin/env micropython
"""
MIT license
(C) Konstantin Belyalov 2017-2018
"""
import tinyweb
# Create web server application
app = tinyweb.webserver()
# Index page
@app.route('/')
async def index(request, response):
# Start HTTP response with content-type text/html
await response.start_html()
# Send actual HTML page
await response.send('<html><body><h1>Hello, world! (<a href="/table">table</a>)</h1></html>\n')
# HTTP redirection
@app.route('/redirect')
async def redirect(request, response):
# Start HTTP response with content-type text/html
await response.redirect('/')
# Another one, more complicated page
@app.route('/table')
async def table(request, response):
# Start HTTP response with content-type text/html
await response.start_html()
await response.send('<html><body><h1>Simple table</h1>'
'<table border=1 width=400>'
'<tr><td>Name</td><td>Some Value</td></tr>')
for i in range(10):
await response.send('<tr><td>Name{}</td><td>Value{}</td></tr>'.format(i, i))
await response.send('</table>'
'</html>')
def run():
app.run(host='0.0.0.0', port=8081)
if __name__ == '__main__':
run()
# To test your server:
# - Terminal:
# $ curl http://localhost:8081
# or
# $ curl http://localhost:8081/table
#
# - Browser:
# http://localhost:8081
# http://localhost:8081/table
#
# - To test HTTP redirection:
# curl http://localhost:8081/redirect -v
| 25.47541 | 99 | 0.608752 |
import tinyweb
app = tinyweb.webserver()
@app.route('/')
async def index(request, response):
await response.start_html()
await response.send('<html><body><h1>Hello, world! (<a href="/table">table</a>)</h1></html>\n')
@app.route('/redirect')
async def redirect(request, response):
await response.redirect('/')
@app.route('/table')
async def table(request, response):
await response.start_html()
await response.send('<html><body><h1>Simple table</h1>'
'<table border=1 width=400>'
'<tr><td>Name</td><td>Some Value</td></tr>')
for i in range(10):
await response.send('<tr><td>Name{}</td><td>Value{}</td></tr>'.format(i, i))
await response.send('</table>'
'</html>')
def run():
app.run(host='0.0.0.0', port=8081)
if __name__ == '__main__':
run()
| true | true |
1c337acc17a2be10f8496a8319d99b7d4033b230 | 1,148 | py | Python | FMR3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | FMR3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | FMR3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | '''
Description : Creating Filter, Map, Reduce Using Lambda In CheckEven & Increment Function
Function Date : 21 Feb 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
from functools import reduce
CheckEven = lambda no : (no % 2 == 0) # (no % 2) it display odd number
Increment = lambda no : no + 2
Add = lambda no1,no2: no1 + no2
def Add(no1, no2):
return no1 + no2
def main():
arr = []
print("Enter Number Of Elements : ")
size = int(input())
for i in range(size):
print("Enter Elements Number : ", i + 1)
no = int(input())
arr.append(no)
print("Your Entered Data Is : ", arr)
newdata = list(filter(CheckEven, arr)) # newdata = MarvellousFilter(arr)
print("After Filtering Data Is : ", newdata)
newdata1 = list(map(Increment, newdata))#newdata1 = MarvellousMap(newdata)
print("After Map Is : ", newdata1)
output = reduce(Add, newdata1)#output = MarvellousReduce(newdata1)
print("After Reduce Result Is : ", output)
if __name__ == "__main__":
main() | 25.511111 | 96 | 0.591463 |
from functools import reduce
CheckEven = lambda no : (no % 2 == 0)
Increment = lambda no : no + 2
Add = lambda no1,no2: no1 + no2
def Add(no1, no2):
return no1 + no2
def main():
arr = []
print("Enter Number Of Elements : ")
size = int(input())
for i in range(size):
print("Enter Elements Number : ", i + 1)
no = int(input())
arr.append(no)
print("Your Entered Data Is : ", arr)
newdata = list(filter(CheckEven, arr))
print("After Filtering Data Is : ", newdata)
newdata1 = list(map(Increment, newdata))
print("After Map Is : ", newdata1)
output = reduce(Add, newdata1)
print("After Reduce Result Is : ", output)
if __name__ == "__main__":
main() | true | true |
1c337ba90e3206e8f089b8a4898c1d48a9a249cf | 695 | py | Python | HTN 2021 Backend/cockroach/manage.py | joonsauce/HTN-2021 | 573722e3a9a74e928562c93a96649dc1e6e5843f | [
"MIT"
] | null | null | null | HTN 2021 Backend/cockroach/manage.py | joonsauce/HTN-2021 | 573722e3a9a74e928562c93a96649dc1e6e5843f | [
"MIT"
] | null | null | null | HTN 2021 Backend/cockroach/manage.py | joonsauce/HTN-2021 | 573722e3a9a74e928562c93a96649dc1e6e5843f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cockroach_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 30.217391 | 82 | 0.661871 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cockroach_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c337bee7cd0693eab260160e467f2c2904382dd | 1,547 | py | Python | django_postgres_extensions/models/sql/subqueries.py | primal100/django_nosql | bb1edc2cbf194fe571a605595a898b2528918301 | [
"BSD-3-Clause"
] | 56 | 2016-08-19T10:47:24.000Z | 2022-01-04T16:19:40.000Z | django_postgres_extensions/models/sql/subqueries.py | primal100/django_nosql | bb1edc2cbf194fe571a605595a898b2528918301 | [
"BSD-3-Clause"
] | 8 | 2016-11-18T17:02:55.000Z | 2020-02-05T02:45:05.000Z | django_postgres_extensions/models/sql/subqueries.py | primal100/django_nosql | bb1edc2cbf194fe571a605595a898b2528918301 | [
"BSD-3-Clause"
] | 30 | 2017-07-17T19:06:15.000Z | 2022-03-26T12:03:01.000Z | from django.db.models.sql.subqueries import UpdateQuery as BaseUpdateQuery
from django.utils import six
from django.core.exceptions import FieldError
class UpdateQuery(BaseUpdateQuery):
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
if '__' in name:
indexes = name.split('__')
field_name = indexes.pop(0)
field = self.get_meta().get_field(field_name)
val = field.get_update_type(indexes, val)
model = field.model
else:
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
else:
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq) | 45.5 | 94 | 0.570136 | from django.db.models.sql.subqueries import UpdateQuery as BaseUpdateQuery
from django.utils import six
from django.core.exceptions import FieldError
class UpdateQuery(BaseUpdateQuery):
def add_update_values(self, values):
values_seq = []
for name, val in six.iteritems(values):
if '__' in name:
indexes = name.split('__')
field_name = indexes.pop(0)
field = self.get_meta().get_field(field_name)
val = field.get_update_type(indexes, val)
model = field.model
else:
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
else:
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq) | true | true |
1c337d56b55c427783a9423271a2671bd9e50c9a | 21,999 | py | Python | baselines/her/ddpg.py | swkokr/FetchPickAndPlace_HER_DDPG | 8378b53dac922cffeff8e2bdabca69cf6fd8bd54 | [
"MIT"
] | null | null | null | baselines/her/ddpg.py | swkokr/FetchPickAndPlace_HER_DDPG | 8378b53dac922cffeff8e2bdabca69cf6fd8bd54 | [
"MIT"
] | null | null | null | baselines/her/ddpg.py | swkokr/FetchPickAndPlace_HER_DDPG | 8378b53dac922cffeff8e2bdabca69cf6fd8bd54 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.common import tf_util
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
global DEMO_BUFFER #buffer for demonstrations
class DDPG(object):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
sample_transitions, gamma, reuse=False, **kwargs):
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
Added functionality to use demonstrations for training to Overcome exploration problem.
Args:
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
actions (u)
buffer_size (int): number of transitions that are stored in the replay buffer
hidden (int): number of units in the hidden layers
layers (int): number of hidden layers
network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
polyak (float): coefficient for Polyak-averaging of the target network
batch_size (int): batch size for training
Q_lr (float): learning rate for the Q (critic) network
pi_lr (float): learning rate for the pi (actor) network
norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
action_l2 (float): coefficient for L2 penalty on the actions
clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
scope (str): the scope used for the TensorFlow graph
T (int): the time horizon for rollouts
rollout_batch_size (int): number of parallel rollouts per DDPG agent
subtract_goals (function): function that subtracts goals from each other
relative_goals (boolean): whether or not relative goals should be fed into the network
clip_pos_returns (boolean): whether or not positive returns should be clipped
clip_return (float): clip returns to be in [-clip_return, clip_return]
sample_transitions (function) function that samples from the replay buffer
gamma (float): gamma used for Q learning updates
reuse (boolean): whether or not the networks should be reused
bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss
q_filter: whether or not a filter on the q value update should be used when training with demonstartions
num_demo: Number of episodes in to be used in the demonstration buffer
demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread
prm_loss_weight: Weight corresponding to the primary loss
aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss
"""
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
# Prepare staging area for feeding data to the model.
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
self.stage_shapes = stage_shapes
# Create network.
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T-1 if key != 'o' else self.T, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
global DEMO_BUFFER
DEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def step(self, obs):
actions = self.get_actions(obs['observation'], obs['achieved_goal'], obs['desired_goal'])
return actions, None, None, None
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
# feed
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def init_demo_buffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer
demoData = np.load(demoDataFile, allow_pickle=True) #load the demonstration data from data file
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
demo_data_obs = demoData['obs']
demo_data_acs = demoData['acs']
demo_data_info = demoData['info']
for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
i = 0
for transition in range(self.T - 1):
obs.append([demo_data_obs[epsd][transition].get('observation')])
acts.append([demo_data_acs[epsd][transition]])
goals.append([demo_data_obs[epsd][transition].get('desired_goal')])
achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')])
for idx, key in enumerate(info_keys):
info_values[idx][transition, i] = demo_data_info[epsd][transition][key]
obs.append([demo_data_obs[epsd][self.T - 1].get('observation')])
achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')])
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(info_keys, info_values):
episode['info_{}'.format(key)] = value
episode = convert_episode_to_batch_major(episode)
global DEMO_BUFFER
DEMO_BUFFER.store_episode(episode) # create the observation dict and append them into the demonstration buffer
logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
if update_stats:
# add transitions to normalizer to normalize the demo data as well
episode['o_2'] = episode['o'][:, 1:, :]
episode['ag_2'] = episode['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode)
transitions = self.sample_transitions(episode, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
episode.clear()
logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _grads(self):
# Avoid feed_dict here for performance!
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
global DEMO_BUFFER
transitions_demo = DEMO_BUFFER.sample(self.demo_batch_size) #sample from the demo buffer
for k, values in transitions_demo.items():
rolloutV = transitions[k].tolist()
for v in values:
rolloutV.append(v.tolist())
transitions[k] = np.array(rolloutV)
else:
transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf_util.get_session()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
#choose only the demo buffer samples
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else: #If not training with demonstrations
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
def save(self, save_path):
tf_util.save_variables(save_path)
| 48.995546 | 212 | 0.630347 | from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.common import tf_util
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
global DEMO_BUFFER
class DDPG(object):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
sample_transitions, gamma, reuse=False, **kwargs):
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
self.stage_shapes = stage_shapes
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
buffer_shapes = {key: (self.T-1 if key != 'o' else self.T, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
global DEMO_BUFFER
DEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def step(self, obs):
actions = self.get_actions(obs['observation'], obs['achieved_goal'], obs['desired_goal'])
return actions, None, None, None
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape)
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u)
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def init_demo_buffer(self, demoDataFile, update_stats=True):
demoData = np.load(demoDataFile, allow_pickle=True)
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
demo_data_obs = demoData['obs']
demo_data_acs = demoData['acs']
demo_data_info = demoData['info']
for epsd in range(self.num_demo):
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
i = 0
for transition in range(self.T - 1):
obs.append([demo_data_obs[epsd][transition].get('observation')])
acts.append([demo_data_acs[epsd][transition]])
goals.append([demo_data_obs[epsd][transition].get('desired_goal')])
achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')])
for idx, key in enumerate(info_keys):
info_values[idx][transition, i] = demo_data_info[epsd][transition][key]
obs.append([demo_data_obs[epsd][self.T - 1].get('observation')])
achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')])
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(info_keys, info_values):
episode['info_{}'.format(key)] = value
episode = convert_episode_to_batch_major(episode)
global DEMO_BUFFER
DEMO_BUFFER.store_episode(episode)
logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size())
if update_stats:
episode['o_2'] = episode['o'][:, 1:, :]
episode['ag_2'] = episode['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode)
transitions = self.sample_transitions(episode, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
episode.clear()
logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size())
def store_episode(self, episode_batch, update_stats=True):
self.buffer.store_episode(episode_batch)
if update_stats:
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _grads(self):
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
if self.bc_loss:
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
global DEMO_BUFFER
transitions_demo = DEMO_BUFFER.sample(self.demo_batch_size)
for k, values in transitions_demo.items():
rolloutV = transitions[k].tolist()
for v in values:
rolloutV.append(v.tolist())
transitions[k] = np.array(rolloutV)
else:
transitions = self.buffer.sample(self.batch_size)
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf_util.get_session()
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 :
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1])
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
elif self.bc_loss == 1 and self.q_filter == 0:
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else:
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
def save(self, save_path):
tf_util.save_variables(save_path)
| true | true |
1c337e6758b16a9bafaa9f9554d376509b23dd65 | 49 | py | Python | day.py | dbk9eb/cs3240-labdemo | a03b7487ff2fafd1cb45227db47084152b062a70 | [
"MIT"
] | null | null | null | day.py | dbk9eb/cs3240-labdemo | a03b7487ff2fafd1cb45227db47084152b062a70 | [
"MIT"
] | null | null | null | day.py | dbk9eb/cs3240-labdemo | a03b7487ff2fafd1cb45227db47084152b062a70 | [
"MIT"
] | null | null | null | import helper
helper.print_message("Good day!")
| 12.25 | 33 | 0.77551 | import helper
helper.print_message("Good day!")
| true | true |
1c337e7eeef15dde12d44510cd913c3e9a628fd0 | 7,825 | py | Python | test/test_niaapi_api.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | test/test_niaapi_api.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | test/test_niaapi_api.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.api.niaapi_api import NiaapiApi # noqa: E501
from intersight.rest import ApiException
class TestNiaapiApi(unittest.TestCase):
"""NiaapiApi unit test stubs"""
def setUp(self):
self.api = intersight.api.niaapi_api.NiaapiApi() # noqa: E501
def tearDown(self):
pass
def test_get_niaapi_apic_cco_post_by_moid(self):
"""Test case for get_niaapi_apic_cco_post_by_moid
Read a 'niaapi.ApicCcoPost' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_cco_post_list(self):
"""Test case for get_niaapi_apic_cco_post_list
Read a 'niaapi.ApicCcoPost' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_field_notice_by_moid(self):
"""Test case for get_niaapi_apic_field_notice_by_moid
Read a 'niaapi.ApicFieldNotice' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_field_notice_list(self):
"""Test case for get_niaapi_apic_field_notice_list
Read a 'niaapi.ApicFieldNotice' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_hweol_by_moid(self):
"""Test case for get_niaapi_apic_hweol_by_moid
Read a 'niaapi.ApicHweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_hweol_list(self):
"""Test case for get_niaapi_apic_hweol_list
Read a 'niaapi.ApicHweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_latest_maintained_release_by_moid(self):
"""Test case for get_niaapi_apic_latest_maintained_release_by_moid
Read a 'niaapi.ApicLatestMaintainedRelease' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_latest_maintained_release_list(self):
"""Test case for get_niaapi_apic_latest_maintained_release_list
Read a 'niaapi.ApicLatestMaintainedRelease' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_release_recommend_by_moid(self):
"""Test case for get_niaapi_apic_release_recommend_by_moid
Read a 'niaapi.ApicReleaseRecommend' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_release_recommend_list(self):
"""Test case for get_niaapi_apic_release_recommend_list
Read a 'niaapi.ApicReleaseRecommend' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_sweol_by_moid(self):
"""Test case for get_niaapi_apic_sweol_by_moid
Read a 'niaapi.ApicSweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_apic_sweol_list(self):
"""Test case for get_niaapi_apic_sweol_list
Read a 'niaapi.ApicSweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_cco_post_by_moid(self):
"""Test case for get_niaapi_dcnm_cco_post_by_moid
Read a 'niaapi.DcnmCcoPost' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_cco_post_list(self):
"""Test case for get_niaapi_dcnm_cco_post_list
Read a 'niaapi.DcnmCcoPost' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_field_notice_by_moid(self):
"""Test case for get_niaapi_dcnm_field_notice_by_moid
Read a 'niaapi.DcnmFieldNotice' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_field_notice_list(self):
"""Test case for get_niaapi_dcnm_field_notice_list
Read a 'niaapi.DcnmFieldNotice' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_hweol_by_moid(self):
"""Test case for get_niaapi_dcnm_hweol_by_moid
Read a 'niaapi.DcnmHweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_hweol_list(self):
"""Test case for get_niaapi_dcnm_hweol_list
Read a 'niaapi.DcnmHweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_latest_maintained_release_by_moid(self):
"""Test case for get_niaapi_dcnm_latest_maintained_release_by_moid
Read a 'niaapi.DcnmLatestMaintainedRelease' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_latest_maintained_release_list(self):
"""Test case for get_niaapi_dcnm_latest_maintained_release_list
Read a 'niaapi.DcnmLatestMaintainedRelease' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_release_recommend_by_moid(self):
"""Test case for get_niaapi_dcnm_release_recommend_by_moid
Read a 'niaapi.DcnmReleaseRecommend' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_release_recommend_list(self):
"""Test case for get_niaapi_dcnm_release_recommend_list
Read a 'niaapi.DcnmReleaseRecommend' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_sweol_by_moid(self):
"""Test case for get_niaapi_dcnm_sweol_by_moid
Read a 'niaapi.DcnmSweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_dcnm_sweol_list(self):
"""Test case for get_niaapi_dcnm_sweol_list
Read a 'niaapi.DcnmSweol' resource. # noqa: E501
"""
pass
def test_get_niaapi_file_downloader_by_moid(self):
"""Test case for get_niaapi_file_downloader_by_moid
Read a 'niaapi.FileDownloader' resource. # noqa: E501
"""
pass
def test_get_niaapi_file_downloader_list(self):
"""Test case for get_niaapi_file_downloader_list
Read a 'niaapi.FileDownloader' resource. # noqa: E501
"""
pass
def test_get_niaapi_nia_metadata_by_moid(self):
"""Test case for get_niaapi_nia_metadata_by_moid
Read a 'niaapi.NiaMetadata' resource. # noqa: E501
"""
pass
def test_get_niaapi_nia_metadata_list(self):
"""Test case for get_niaapi_nia_metadata_list
Read a 'niaapi.NiaMetadata' resource. # noqa: E501
"""
pass
def test_get_niaapi_version_regex_by_moid(self):
"""Test case for get_niaapi_version_regex_by_moid
Read a 'niaapi.VersionRegex' resource. # noqa: E501
"""
pass
def test_get_niaapi_version_regex_list(self):
"""Test case for get_niaapi_version_regex_list
Read a 'niaapi.VersionRegex' resource. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 32.334711 | 1,052 | 0.689968 |
from __future__ import absolute_import
import unittest
import intersight
from intersight.api.niaapi_api import NiaapiApi
from intersight.rest import ApiException
class TestNiaapiApi(unittest.TestCase):
def setUp(self):
self.api = intersight.api.niaapi_api.NiaapiApi()
def tearDown(self):
pass
def test_get_niaapi_apic_cco_post_by_moid(self):
pass
def test_get_niaapi_apic_cco_post_list(self):
pass
def test_get_niaapi_apic_field_notice_by_moid(self):
pass
def test_get_niaapi_apic_field_notice_list(self):
pass
def test_get_niaapi_apic_hweol_by_moid(self):
pass
def test_get_niaapi_apic_hweol_list(self):
pass
def test_get_niaapi_apic_latest_maintained_release_by_moid(self):
pass
def test_get_niaapi_apic_latest_maintained_release_list(self):
pass
def test_get_niaapi_apic_release_recommend_by_moid(self):
pass
def test_get_niaapi_apic_release_recommend_list(self):
pass
def test_get_niaapi_apic_sweol_by_moid(self):
pass
def test_get_niaapi_apic_sweol_list(self):
pass
def test_get_niaapi_dcnm_cco_post_by_moid(self):
pass
def test_get_niaapi_dcnm_cco_post_list(self):
pass
def test_get_niaapi_dcnm_field_notice_by_moid(self):
pass
def test_get_niaapi_dcnm_field_notice_list(self):
pass
def test_get_niaapi_dcnm_hweol_by_moid(self):
pass
def test_get_niaapi_dcnm_hweol_list(self):
pass
def test_get_niaapi_dcnm_latest_maintained_release_by_moid(self):
pass
def test_get_niaapi_dcnm_latest_maintained_release_list(self):
pass
def test_get_niaapi_dcnm_release_recommend_by_moid(self):
pass
def test_get_niaapi_dcnm_release_recommend_list(self):
pass
def test_get_niaapi_dcnm_sweol_by_moid(self):
pass
def test_get_niaapi_dcnm_sweol_list(self):
pass
def test_get_niaapi_file_downloader_by_moid(self):
pass
def test_get_niaapi_file_downloader_list(self):
pass
def test_get_niaapi_nia_metadata_by_moid(self):
pass
def test_get_niaapi_nia_metadata_list(self):
pass
def test_get_niaapi_version_regex_by_moid(self):
pass
def test_get_niaapi_version_regex_list(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c338012d5f71015b69976f28bb231597f53471c | 4,188 | py | Python | configs/distillers/fgd/fgd_cascade_mask_rcnn_rx101_32x4d_distill_faster_rcnn_r50_fpn_2x_coco.py | jie311/FGD | 031ef509f2f19c281132447d82a1c077943eb64e | [
"Apache-2.0"
] | 103 | 2021-11-23T07:12:41.000Z | 2022-03-31T13:28:10.000Z | configs/distillers/fgd/fgd_cascade_mask_rcnn_rx101_32x4d_distill_faster_rcnn_r50_fpn_2x_coco.py | jie311/FGD | 031ef509f2f19c281132447d82a1c077943eb64e | [
"Apache-2.0"
] | 18 | 2021-11-29T14:59:21.000Z | 2022-03-31T09:43:39.000Z | configs/distillers/fgd/fgd_cascade_mask_rcnn_rx101_32x4d_distill_faster_rcnn_r50_fpn_2x_coco.py | Senwang98/Lightweight-Detection-and-KD | 7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b | [
"Apache-2.0"
] | 12 | 2021-11-28T10:26:38.000Z | 2022-03-28T06:10:44.000Z | _base_ = [
'../../_base_/datasets/coco_detection.py',
'../../_base_/schedules/schedule_2x.py', '../../_base_/default_runtime.py'
]
# model settings
find_unused_parameters=True
temp=0.5
alpha_fgd=0.00005
beta_fgd=0.000025
gamma_fgd=0.00005
lambda_fgd=0.0000005
distiller = dict(
type='DetectionDistiller',
teacher_pretrained = 'https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth',
distill_cfg = [ dict(student_module = 'neck.fpn_convs.3.conv',
teacher_module = 'neck.fpn_convs.3.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_3',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.2.conv',
teacher_module = 'neck.fpn_convs.2.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_2',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.1.conv',
teacher_module = 'neck.fpn_convs.1.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_1',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.0.conv',
teacher_module = 'neck.fpn_convs.0.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_0',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
]
)
student_cfg = 'configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py'
teacher_cfg = 'configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py'
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,) | 49.857143 | 198 | 0.401862 | _base_ = [
'../../_base_/datasets/coco_detection.py',
'../../_base_/schedules/schedule_2x.py', '../../_base_/default_runtime.py'
]
find_unused_parameters=True
temp=0.5
alpha_fgd=0.00005
beta_fgd=0.000025
gamma_fgd=0.00005
lambda_fgd=0.0000005
distiller = dict(
type='DetectionDistiller',
teacher_pretrained = 'https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth',
distill_cfg = [ dict(student_module = 'neck.fpn_convs.3.conv',
teacher_module = 'neck.fpn_convs.3.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_3',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.2.conv',
teacher_module = 'neck.fpn_convs.2.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_2',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.1.conv',
teacher_module = 'neck.fpn_convs.1.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_1',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
dict(student_module = 'neck.fpn_convs.0.conv',
teacher_module = 'neck.fpn_convs.0.conv',
output_hook = True,
methods=[dict(type='FeatureLoss',
name='loss_fgd_fpn_0',
student_channels = 256,
teacher_channels = 256,
temp = temp,
alpha_fgd=alpha_fgd,
beta_fgd=beta_fgd,
gamma_fgd=gamma_fgd,
lambda_fgd=lambda_fgd,
)
]
),
]
)
student_cfg = 'configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py'
teacher_cfg = 'configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py'
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,) | true | true |
1c3380428247352863fee058af182659e36101fa | 10,798 | py | Python | MLCtr/machineLearning/decision_tree.py | devillove084/CollageDesign | e2a85a8d15f82d1f72b754de04af78126eae9a1c | [
"MIT"
] | 3 | 2018-12-28T14:12:53.000Z | 2019-06-08T16:30:25.000Z | MLCtr/machineLearning/decision_tree.py | devillove084/CollageDesign | e2a85a8d15f82d1f72b754de04af78126eae9a1c | [
"MIT"
] | null | null | null | MLCtr/machineLearning/decision_tree.py | devillove084/CollageDesign | e2a85a8d15f82d1f72b754de04af78126eae9a1c | [
"MIT"
] | null | null | null | from __future__ import division, print_function
import numpy as np
import cupy
from graduateutil import divide_on_feature, train_test_split, standardize, mean_squared_error
from graduateutil import calculate_entropy, accuracy_score, calculate_variance
class DecisionNode():
"""Class that represents a decision node or leaf in the decision tree
Parameters:
-----------
feature_i: int
Feature index which we want to use as the threshold measure.
threshold: float
The value that we will compare feature values at feature_i against to
determine the prediction.
value: float
The class prediction if classification tree, or float value if regression tree.
true_branch: DecisionNode
Next decision node for samples where features value met the threshold.
false_branch: DecisionNode
Next decision node for samples where features value did not meet the threshold.
"""
def __init__(self, feature_i=None, threshold=None,
value=None, true_branch=None, false_branch=None):
self.feature_i = feature_i # Index for the feature that is tested
self.threshold = threshold # Threshold value for feature
self.value = value # Value if the node is a leaf in the tree
self.true_branch = true_branch # 'Left' subtree
self.false_branch = false_branch # 'Right' subtree
class DecisionTree(object):
"""Super class of RegressionTree and ClassificationTree.
Parameters:
-----------
min_samples_split: int
The minimum number of samples needed to make a split when building a tree.
min_impurity: float
The minimum impurity required to split the tree further.
max_depth: int
The maximum depth of a tree.
loss: function
Loss function that is used for Gradient Boosting models to calculate impurity.
"""
def __init__(self, min_samples_split=2, min_impurity=1e-7,
max_depth=float("inf"), loss=None):
self.root = None # Root node in dec. tree
# Minimum n of samples to justify split
self.min_samples_split = min_samples_split
# The minimum impurity to justify split
self.min_impurity = min_impurity
# The maximum depth to grow the tree to
self.max_depth = max_depth
# Function to calculate impurity (classif.=>info gain, regr=>variance reduct.)
self._impurity_calculation = None
# Function to determine prediction of y at leaf
self._leaf_value_calculation = None
# If y is one-hot encoded (multi-dim) or not (one-dim)
self.one_dim = None
# If Gradient Boost
self.loss = loss
def fit(self, X, y, loss=None):
""" Build decicion tree """
self.one_dim = len(y.shape) == 1
self.root = self._build_tree(X,y)
self.loss = None
def _build_tree(self, X ,y, current_depth=0):
""" Recursive method which builds out the decision tree and splits X and respective y
on the feature of X which (based on impurity) best separates the data"""
largest_impurity = 0
best_criteria = None # Feature index and threshold
best_sets = None # Subsets of the data
if len(y.shape) == 1:
y = np.expand_dims(y, axis=1)
Xy = np.concatenate((X,y), axis=1)
n_samples, n_features = X.shape
if n_samples >= self.min_samples_split and current_depth <= self.max_depth:
# Calculate the impurity for each feature
for feature_i in range(n_features):
# All values of feature_i
feature_values = np.expand_dims(X[:, feature_i],axis = 1)
unique_values = np.unique(feature_values)
# Iterate through all unique values of feature column i and
# calculate the impurity
for threshold in unique_values:
# Divide X and y depending on if the feature value of X at index feature_i
# meets the threshold
Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold)
if len(Xy1) > 0 and len(Xy2) > 0:
# Select the y-values of the two sets
y1 = Xy1[:, n_features:]
y2 = Xy2[:, n_features:]
# Calculate impurity
impurity = self._impurity_calculation(y, y1, y2)
if impurity > largest_impurity:
largest_impurity = impurity
best_criteria = {"feature_i": feature_i, "threshold": threshold}
best_sets = {
"leftX": Xy1[:, :n_features], # X of left subtree
"lefty": Xy1[:, n_features:], # y of left subtree
"rightX": Xy2[:, :n_features], # X of right subtree
"righty": Xy2[:, n_features:] # y of right subtree
}
if largest_impurity > self.min_impurity:
# Build subtrees for the right and left branches
true_branch = self._build_tree(best_sets["leftX"], best_sets["lefty"], current_depth + 1)
false_branch = self._build_tree(best_sets["rightX"], best_sets["righty"], current_depth + 1)
return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[
"threshold"], true_branch=true_branch, false_branch=false_branch)
# We're at leaf => determine value
leaf_value = self._leaf_value_calculation(y)
return DecisionNode(value=leaf_value)
def predict_value(self, x, tree=None):
""" Do a recursive search down the tree and make a prediction of the data sample by the
value of the leaf that we end up at """
if tree is None:
tree = self.root
# If we have a value (i.e we're at a leaf) => return value as the prediction
if tree.value is not None:
return tree.value
# Choose the feature that we will test
feature_value = x[tree.feature_i]
# Determine if we will follow left or right branch
branch = tree.false_branch
if isinstance(feature_value, int) or isinstance(feature_value, float):
if feature_value >= tree.threshold:
branch = tree.true_branch
elif feature_value == tree.threshold:
branch = tree.true_branch
# Test subtree
return self.predict_value(x, branch)
def predict(self, X):
""" Classify samples one by one and return the set of labels """
y_pred = [self.predict_value(sample) for sample in X]
return y_pred
def print_tree(self, tree=None, indent=" "):
""" Recursively print the decision tree """
if not tree:
tree = self.root
# If we're at leaf => print the label
if tree.value is not None:
print (tree.value)
# Go deeper down the tree
else:
# Print test
print ("%s:%s? " % (tree.feature_i, tree.threshold))
# Print the true scenario
print ("%sT->" % (indent), end="")
self.print_tree(tree.true_branch, indent + indent)
# Print the false scenario
print ("%sF->" % (indent), end="")
self.print_tree(tree.false_branch, indent + indent)
class XGBoostRegressionTree(DecisionTree):
"""
Regression tree for XGBoost
- Reference -
http://xgboost.readthedocs.io/en/latest/model.html
"""
def _split(self, y):
""" y contains y_true in left half of the middle column and
y_pred in the right half. Split and return the two matrices """
col = int(np.shape(y)[1]/2)
y, y_pred = y[:, :col], y[:, col:]
return y, y_pred
def _gain(self, y, y_pred):
nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2)
denominator = self.loss.hess(y, y_pred).sum()
return 0.5 * (nominator / denominator)
def _gain_by_taylor(self, y, y1, y2):
# Split
y, y_pred = self._split(y)
y1, y1_pred = self._split(y1)
y2, y2_pred = self._split(y2)
true_gain = self._gain(y1, y1_pred)
false_gain = self._gain(y2, y2_pred)
gain = self._gain(y, y_pred)
return true_gain + false_gain - gain
def _approximate_update(self, y):
# y split into y, y_pred
y, y_pred = self._split(y)
# Newton's Method
gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0)
hessian = np.sum(self.loss.hess(y, y_pred), axis=0)
update_approximation = gradient / hessian
return update_approximation
def fit(self, X, y):
self._impurity_calculation = self._gain_by_taylor
self._leaf_value_calculation = self._approximate_update
super(XGBoostRegressionTree, self).fit(X, y)
class RegressionTree(DecisionTree):
def _calculate_variance_reduction(self, y, y1, y2):
var_tot = calculate_variance(y)
var_1 = calculate_variance(y1)
var_2 = calculate_variance(y2)
frac_1 = len(y1) / len(y)
frac_2 = len(y2) / len(y)
# Calculate the variance reduction
variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)
return sum(variance_reduction)
def _mean_of_y(self, y):
value = np.mean(y, axis=0)
return value if len(value) > 1 else value[0]
def fit(self, X, y):
self._impurity_calculation = self._calculate_variance_reduction
self._leaf_value_calculation = self._mean_of_y
super(RegressionTree, self).fit(X, y)
class ClassificationTree(DecisionTree):
def _calculate_information_gain(self, y, y1, y2):
# Calculate information gain
p = len(y1) / len(y)
entropy = calculate_entropy(y)
info_gain = entropy - p * \
calculate_entropy(y1) - (1 - p) * \
calculate_entropy(y2)
return info_gain
def _majority_vote(self, y):
most_common = None
max_count = 0
for label in np.unique(y):
# Count number of occurences of samples with label
count = len(y[y == label])
if count > max_count:
most_common = label
max_count = count
return most_common
def fit(self, X, y):
self._impurity_calculation = self._calculate_information_gain
self._leaf_value_calculation = self._majority_vote
super(ClassificationTree, self).fit(X, y) | 39.265455 | 104 | 0.602889 | from __future__ import division, print_function
import numpy as np
import cupy
from graduateutil import divide_on_feature, train_test_split, standardize, mean_squared_error
from graduateutil import calculate_entropy, accuracy_score, calculate_variance
class DecisionNode():
def __init__(self, feature_i=None, threshold=None,
value=None, true_branch=None, false_branch=None):
self.feature_i = feature_i
self.threshold = threshold
self.value = value
self.true_branch = true_branch
self.false_branch = false_branch
class DecisionTree(object):
def __init__(self, min_samples_split=2, min_impurity=1e-7,
max_depth=float("inf"), loss=None):
self.root = None
self.min_samples_split = min_samples_split
self.min_impurity = min_impurity
self.max_depth = max_depth
self._impurity_calculation = None
self._leaf_value_calculation = None
self.one_dim = None
self.loss = loss
def fit(self, X, y, loss=None):
self.one_dim = len(y.shape) == 1
self.root = self._build_tree(X,y)
self.loss = None
def _build_tree(self, X ,y, current_depth=0):
largest_impurity = 0
best_criteria = None
best_sets = None
if len(y.shape) == 1:
y = np.expand_dims(y, axis=1)
Xy = np.concatenate((X,y), axis=1)
n_samples, n_features = X.shape
if n_samples >= self.min_samples_split and current_depth <= self.max_depth:
for feature_i in range(n_features):
feature_values = np.expand_dims(X[:, feature_i],axis = 1)
unique_values = np.unique(feature_values)
for threshold in unique_values:
Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold)
if len(Xy1) > 0 and len(Xy2) > 0:
y1 = Xy1[:, n_features:]
y2 = Xy2[:, n_features:]
impurity = self._impurity_calculation(y, y1, y2)
if impurity > largest_impurity:
largest_impurity = impurity
best_criteria = {"feature_i": feature_i, "threshold": threshold}
best_sets = {
"leftX": Xy1[:, :n_features],
"lefty": Xy1[:, n_features:],
"rightX": Xy2[:, :n_features],
"righty": Xy2[:, n_features:]
}
if largest_impurity > self.min_impurity:
true_branch = self._build_tree(best_sets["leftX"], best_sets["lefty"], current_depth + 1)
false_branch = self._build_tree(best_sets["rightX"], best_sets["righty"], current_depth + 1)
return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[
"threshold"], true_branch=true_branch, false_branch=false_branch)
leaf_value = self._leaf_value_calculation(y)
return DecisionNode(value=leaf_value)
def predict_value(self, x, tree=None):
if tree is None:
tree = self.root
# If we have a value (i.e we're at a leaf) => return value as the prediction
if tree.value is not None:
return tree.value
feature_value = x[tree.feature_i]
branch = tree.false_branch
if isinstance(feature_value, int) or isinstance(feature_value, float):
if feature_value >= tree.threshold:
branch = tree.true_branch
elif feature_value == tree.threshold:
branch = tree.true_branch
return self.predict_value(x, branch)
def predict(self, X):
y_pred = [self.predict_value(sample) for sample in X]
return y_pred
def print_tree(self, tree=None, indent=" "):
if not tree:
tree = self.root
if tree.value is not None:
print (tree.value)
# Go deeper down the tree
else:
# Print test
print ("%s:%s? " % (tree.feature_i, tree.threshold))
# Print the true scenario
print ("%sT->" % (indent), end="")
self.print_tree(tree.true_branch, indent + indent)
# Print the false scenario
print ("%sF->" % (indent), end="")
self.print_tree(tree.false_branch, indent + indent)
class XGBoostRegressionTree(DecisionTree):
def _split(self, y):
col = int(np.shape(y)[1]/2)
y, y_pred = y[:, :col], y[:, col:]
return y, y_pred
def _gain(self, y, y_pred):
nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2)
denominator = self.loss.hess(y, y_pred).sum()
return 0.5 * (nominator / denominator)
def _gain_by_taylor(self, y, y1, y2):
# Split
y, y_pred = self._split(y)
y1, y1_pred = self._split(y1)
y2, y2_pred = self._split(y2)
true_gain = self._gain(y1, y1_pred)
false_gain = self._gain(y2, y2_pred)
gain = self._gain(y, y_pred)
return true_gain + false_gain - gain
def _approximate_update(self, y):
# y split into y, y_pred
y, y_pred = self._split(y)
# Newton's Method
gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0)
hessian = np.sum(self.loss.hess(y, y_pred), axis=0)
update_approximation = gradient / hessian
return update_approximation
def fit(self, X, y):
self._impurity_calculation = self._gain_by_taylor
self._leaf_value_calculation = self._approximate_update
super(XGBoostRegressionTree, self).fit(X, y)
class RegressionTree(DecisionTree):
def _calculate_variance_reduction(self, y, y1, y2):
var_tot = calculate_variance(y)
var_1 = calculate_variance(y1)
var_2 = calculate_variance(y2)
frac_1 = len(y1) / len(y)
frac_2 = len(y2) / len(y)
variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)
return sum(variance_reduction)
def _mean_of_y(self, y):
value = np.mean(y, axis=0)
return value if len(value) > 1 else value[0]
def fit(self, X, y):
self._impurity_calculation = self._calculate_variance_reduction
self._leaf_value_calculation = self._mean_of_y
super(RegressionTree, self).fit(X, y)
class ClassificationTree(DecisionTree):
def _calculate_information_gain(self, y, y1, y2):
p = len(y1) / len(y)
entropy = calculate_entropy(y)
info_gain = entropy - p * \
calculate_entropy(y1) - (1 - p) * \
calculate_entropy(y2)
return info_gain
def _majority_vote(self, y):
most_common = None
max_count = 0
for label in np.unique(y):
count = len(y[y == label])
if count > max_count:
most_common = label
max_count = count
return most_common
def fit(self, X, y):
self._impurity_calculation = self._calculate_information_gain
self._leaf_value_calculation = self._majority_vote
super(ClassificationTree, self).fit(X, y) | true | true |
1c338084d15ebb05209be561572bd9f31821a301 | 289 | py | Python | define.py | nikv96/Retail-Work-Allocation-Program | 23da2478c287846ebba59b30c94dd4e28aba2a62 | [
"MIT"
] | 1 | 2019-01-16T05:00:29.000Z | 2019-01-16T05:00:29.000Z | define.py | nikv96/Retail-Work-Allocation-Program | 23da2478c287846ebba59b30c94dd4e28aba2a62 | [
"MIT"
] | null | null | null | define.py | nikv96/Retail-Work-Allocation-Program | 23da2478c287846ebba59b30c94dd4e28aba2a62 | [
"MIT"
] | null | null | null | '''This file has all the definitions'''
weekends = ["Sunday", "Saturday"]
weekdays = ["Monday","Tuesday","Wednesday","Thursday","Friday"]
days = ["Sunday", "Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
unusable_words = ["Name", "Off", "Days", "========================"]
| 41.285714 | 80 | 0.598616 |
weekends = ["Sunday", "Saturday"]
weekdays = ["Monday","Tuesday","Wednesday","Thursday","Friday"]
days = ["Sunday", "Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
unusable_words = ["Name", "Off", "Days", "========================"]
| true | true |
1c3380ecd30ca56195bc242c33cef8c3f44c596d | 4,329 | py | Python | photo_gallery/models.py | Fritzip/splitted-nz | 405089440350c4808144d5ed711606513efcdefb | [
"Apache-2.0"
] | null | null | null | photo_gallery/models.py | Fritzip/splitted-nz | 405089440350c4808144d5ed711606513efcdefb | [
"Apache-2.0"
] | 3 | 2019-11-16T08:10:48.000Z | 2020-03-14T23:14:41.000Z | photo_gallery/models.py | Fritzip/splitted-nz | 405089440350c4808144d5ed711606513efcdefb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.urls import reverse
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit
from datetime import datetime
def event_date(start, end):
if end is None or start == end:
return start.strftime("%d %b %Y")
elif start.strftime("%b") == end.strftime("%b"):
return start.strftime("%d") + " - " + end.strftime("%d %b %Y")
else:
return start.strftime("%d %b") + " - " + end.strftime("%d %b %Y")
class Article(models.Model):
title = models.CharField(max_length=70)
description = models.TextField(max_length=8192, null=True, blank=True)
image = models.ImageField()
thumb = ImageSpecField(source='image',
processors=[ResizeToFit(100)],
format='JPEG',
options={'quality': 5})
is_visible = models.BooleanField(default=True)
start_date = models.DateField(default=datetime.today)
end_date = models.DateField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(max_length=50, unique=True)
@property
def get_event_date(self):
return event_date(self.start_date, self.end_date)
@property
def letters(self):
ltitle = self.title.split()
if not ltitle:
return ""
if len(ltitle) == 1:
return self.title[0:2].upper()
for word in ('et','à'):
ltitle = list(filter((word).__ne__, ltitle))
return ltitle[0][0].upper()+ltitle[1][0].upper()
def __str__(self):
return self.title
class ArticleImage(models.Model):
# image = ProcessedImageField(upload_to='albums', processors=[ResizeToFit(1280)], format='JPEG', options={'quality': 70})
image = models.ImageField()
thumb = ImageSpecField(source='image',
processors=[ResizeToFit(100)],
format='JPEG',
options={'quality': 5}) # CACHE folder in media/
album = models.ForeignKey(Article, on_delete=models.CASCADE)
alt = models.CharField(max_length=255, default=uuid.uuid4)
caption = models.TextField(max_length=2048, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
width = models.IntegerField(default=0)
height = models.IntegerField(default=0)
slug = models.SlugField(max_length=70, default=uuid.uuid4, editable=False)
@receiver(post_delete, sender=ArticleImage)
def submission_delete(sender, instance, **kwargs):
instance.image.delete(False)
@receiver(post_delete, sender=Article)
def submission_delete(sender, instance, **kwargs):
instance.thumb.delete(False)
# from djgeojson.fields import PointField
# class SleepSpot(models.Model):
# album = models.ForeignKey(Article, on_delete=models.SET_NULL, blank=True, null=True)
# title = models.CharField(max_length=256)
# start_date = models.DateField(null=True, blank=True)
# end_date = models.DateField(null=True, blank=True)
# geom = PointField(null=True, blank=True, default={})
# @property
# def popupContent(self):
# popup = '<b>{}</b><br>{}'.format(self.title, event_date(self.start_date, self.end_date))
# if self.album:
# article_link = 'Article : <a href="{}">{}</a>'.format(reverse('photo_gallery:article', args=(self.album.slug,)), self.album.title)
# else:
# article_link = 'Pas d\'article en rapport'
# popup += '<br><span class=article-link-popup>{}</span>'.format(article_link)
# popup += '<span class="btn-floating btn-small waves-effect waves-light zoom-in-popup"><i class="fas fa-compress-arrows-alt"></i></span>'
# return popup
# def __str__(self):
# return self.title
# def save(self, *args, **kwargs):
# if self.album and not self.start_date:
# self.start_date = self.album.start_date
# self.end_date = self.album.end_date
# super(SleepSpot, self).save(*args, **kwargs)
| 37.973684 | 146 | 0.635482 |
import uuid
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.urls import reverse
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit
from datetime import datetime
def event_date(start, end):
if end is None or start == end:
return start.strftime("%d %b %Y")
elif start.strftime("%b") == end.strftime("%b"):
return start.strftime("%d") + " - " + end.strftime("%d %b %Y")
else:
return start.strftime("%d %b") + " - " + end.strftime("%d %b %Y")
class Article(models.Model):
title = models.CharField(max_length=70)
description = models.TextField(max_length=8192, null=True, blank=True)
image = models.ImageField()
thumb = ImageSpecField(source='image',
processors=[ResizeToFit(100)],
format='JPEG',
options={'quality': 5})
is_visible = models.BooleanField(default=True)
start_date = models.DateField(default=datetime.today)
end_date = models.DateField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(max_length=50, unique=True)
@property
def get_event_date(self):
return event_date(self.start_date, self.end_date)
@property
def letters(self):
ltitle = self.title.split()
if not ltitle:
return ""
if len(ltitle) == 1:
return self.title[0:2].upper()
for word in ('et','à'):
ltitle = list(filter((word).__ne__, ltitle))
return ltitle[0][0].upper()+ltitle[1][0].upper()
def __str__(self):
return self.title
class ArticleImage(models.Model):
image = models.ImageField()
thumb = ImageSpecField(source='image',
processors=[ResizeToFit(100)],
format='JPEG',
options={'quality': 5})
album = models.ForeignKey(Article, on_delete=models.CASCADE)
alt = models.CharField(max_length=255, default=uuid.uuid4)
caption = models.TextField(max_length=2048, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
width = models.IntegerField(default=0)
height = models.IntegerField(default=0)
slug = models.SlugField(max_length=70, default=uuid.uuid4, editable=False)
@receiver(post_delete, sender=ArticleImage)
def submission_delete(sender, instance, **kwargs):
instance.image.delete(False)
@receiver(post_delete, sender=Article)
def submission_delete(sender, instance, **kwargs):
instance.thumb.delete(False)
# popup += '<br><span class=article-link-popup>{}</span>'.format(article_link)
# popup += '<span class="btn-floating btn-small waves-effect waves-light zoom-in-popup"><i class="fas fa-compress-arrows-alt"></i></span>'
# return popup
# def __str__(self):
# return self.title
# def save(self, *args, **kwargs):
# if self.album and not self.start_date:
# self.start_date = self.album.start_date
# self.end_date = self.album.end_date
# super(SleepSpot, self).save(*args, **kwargs)
| true | true |
1c33815f990bfe1e8339c56f82402e636f730e30 | 938 | py | Python | clients/client/python/test/test_identity_schema_location.py | ory/sdk | 9849c6115f44f4b7612ad246124d80b4401fd730 | [
"Apache-2.0"
] | 77 | 2020-02-14T17:27:36.000Z | 2022-03-25T08:44:52.000Z | clients/client/python/test/test_identity_schema_location.py | vinckr/sdk | 5b93557835af7ad3662ef620b3ef10729149d484 | [
"Apache-2.0"
] | 125 | 2020-02-07T21:45:52.000Z | 2022-03-31T12:54:24.000Z | clients/client/python/test/test_identity_schema_location.py | vinckr/sdk | 5b93557835af7ad3662ef620b3ef10729149d484 | [
"Apache-2.0"
] | 44 | 2020-01-31T22:05:47.000Z | 2022-03-09T14:41:22.000Z | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.30
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.identity_schema_location import IdentitySchemaLocation
class TestIdentitySchemaLocation(unittest.TestCase):
"""IdentitySchemaLocation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIdentitySchemaLocation(self):
"""Test IdentitySchemaLocation"""
# FIXME: construct object with mandatory attributes with example values
# model = IdentitySchemaLocation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.351351 | 194 | 0.716418 |
import sys
import unittest
import ory_client
from ory_client.model.identity_schema_location import IdentitySchemaLocation
class TestIdentitySchemaLocation(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testIdentitySchemaLocation(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c33821447ee3a2205f9ad20463239156a6209b2 | 1,375 | py | Python | AtC_Reg_Con_041-050/ARC050/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Reg_Con_041-050/ARC050/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Reg_Con_041-050/ARC050/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | import math, string, itertools, fractions, heapq, collections, re, array, bisect, sys, copy, functools, random
from collections import deque, defaultdict, Counter; from heapq import heappush, heappop
from itertools import permutations, combinations, product, accumulate, groupby
from bisect import bisect_left, bisect_right, insort_left, insort_right; sys.setrecursionlimit(10 ** 7)
inf = 10 ** 20; INF = float("INF"); ans = 0; tmp = 0; ansli = []; tmpli = []; candili = []
eps = 1.0 / 10 ** 10; mod = 10 ** 9 + 7
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]; ddn = dd + [(-1, 1), (1, 1), (1, -1), (-1, -1)]; ddn9 = ddn + [(0, 0)]
"""for dx, dy in dd:
nx = j + dx; ny = i + dy
if 0 <= nx < w and 0 <= ny < h:"""
def wi(): return list(map(int, sys.stdin.readline().split()))
def wip(): return [int(x) - 1 for x in sys.stdin.readline().split()]#WideIntPoint
def ws(): return sys.stdin.readline().split()
def i(): return int(sys.stdin.readline())
def s(): return input()
def hi(n): return [i() for _ in range(n)]
def hs(n): return [s() for _ in range(n)]#HeightString
def mi(n): return [wi() for _ in range(n)]#MatrixInt
def mip(n): return [wip() for _ in range(n)]
def ms(n): return [ws() for _ in range(n)]
R, B = wi()
x, y = wi()
if x == 1 and y == 1:
print(min(R, B))
else:
print(max(0, R / x, B, R, B / y, (R * (y - 1) + B * (x - 1)) / (x * y - 1))) | 50.925926 | 111 | 0.593455 | import math, string, itertools, fractions, heapq, collections, re, array, bisect, sys, copy, functools, random
from collections import deque, defaultdict, Counter; from heapq import heappush, heappop
from itertools import permutations, combinations, product, accumulate, groupby
from bisect import bisect_left, bisect_right, insort_left, insort_right; sys.setrecursionlimit(10 ** 7)
inf = 10 ** 20; INF = float("INF"); ans = 0; tmp = 0; ansli = []; tmpli = []; candili = []
eps = 1.0 / 10 ** 10; mod = 10 ** 9 + 7
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]; ddn = dd + [(-1, 1), (1, 1), (1, -1), (-1, -1)]; ddn9 = ddn + [(0, 0)]
def wi(): return list(map(int, sys.stdin.readline().split()))
def wip(): return [int(x) - 1 for x in sys.stdin.readline().split()]
def ws(): return sys.stdin.readline().split()
def i(): return int(sys.stdin.readline())
def s(): return input()
def hi(n): return [i() for _ in range(n)]
def hs(n): return [s() for _ in range(n)]
def mi(n): return [wi() for _ in range(n)]
def mip(n): return [wip() for _ in range(n)]
def ms(n): return [ws() for _ in range(n)]
R, B = wi()
x, y = wi()
if x == 1 and y == 1:
print(min(R, B))
else:
print(max(0, R / x, B, R, B / y, (R * (y - 1) + B * (x - 1)) / (x * y - 1))) | true | true |
1c33828faa0aa4daff20627b4fe5cc532a8ee1a8 | 321 | py | Python | src/pvt_model/pvt_system/__init__.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | 1 | 2021-05-11T14:15:11.000Z | 2021-05-11T14:15:11.000Z | src/pvt_model/pvt_system/__init__.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | 14 | 2021-02-23T11:53:08.000Z | 2021-11-16T10:45:31.000Z | src/pvt_model/pvt_system/__init__.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
########################################################################################
# __init__.py - The init module for the PVT model component.
#
# Author: Ben Winchester
# Copyright: Ben Winchester, 2020
########################################################################################
| 40.125 | 88 | 0.327103 | true | true | |
1c3382fc20e6b4435b9f77ff3cc16a4a96a24d0b | 34,879 | py | Python | tests/ManualTableau/TestIplManualTableau.py | oIi123/TableauxProver | cb527f91f5c2d0393fbfcb3fb501b4480e0c9031 | [
"MIT"
] | null | null | null | tests/ManualTableau/TestIplManualTableau.py | oIi123/TableauxProver | cb527f91f5c2d0393fbfcb3fb501b4480e0c9031 | [
"MIT"
] | null | null | null | tests/ManualTableau/TestIplManualTableau.py | oIi123/TableauxProver | cb527f91f5c2d0393fbfcb3fb501b4480e0c9031 | [
"MIT"
] | null | null | null | import unittest
from src.builder_factory import LogicType
from src.Parser.PropParser import PropParser
from src.TableauxBuilder.BaseManualTableau import BaseManualTableau, BaseTableauxBuilder
from src.TableauxBuilder.IpcTableauxBuilder import IpcTableauxBuilder
def parse(expr: str):
return PropParser.parse(expr).expr
class TestPlManualTableau(unittest.TestCase):
def test_incorrect_1(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [[]], [[]], [])
self.assertFalse(success)
def test_incorrect_2(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
parse('(a->c)'),
]
r_expr = [
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_3(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
]
r_expr = [
parse('(b->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_4(self):
expr_t = parse('(a->b)&(b->c)->(a->c)')
expr_f = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
]
r_expr = [
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_5(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = [
parse('a'),
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_6(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = [
parse('a'),
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_correct_1(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = []
cf_expr = [
parse('!a')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [cf_expr], [])
self.assertTrue(success)
def test_correct_2(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = [
parse('a'),
parse('b')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_3(self):
expr_t = parse('!!a')
expr_f = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)')
]
r_expr = [
parse('(a->c)')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_4(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('!s')
]
r_expr = [
parse('!p')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f[0], None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_5(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('q')], []
]
r_expr = [
[], [parse('p')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_6(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[], [parse('q')]
]
r_expr = [
[parse('p')], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_7(self):
expr_t = [
parse('a|b'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('b')], [parse('a')]
]
r_expr = [
[], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_8(self):
expr_t = [
parse('a|b'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('a')], [parse('b')]
]
r_expr = [
[], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_9(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a'),
parse('b&c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_10(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a&b'),
parse('c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_11(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('!a')
]
l_expr = [
parse('a')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_12(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a&b')
]
l_expr = [[],[]]
r_expr = [[],[]]
cf_expr = [
[parse('a')],
[parse('b')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_13(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a|b')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('a'),
parse('b')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_14(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a->b')
]
l_expr = [[
parse('a')
]]
r_expr = [[
parse('b')
]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_15(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a<->b')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('(a->b)&(b->a)')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_merge_true_and_perm_1(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a&b'),
parse('c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('c'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(tableau.children))
def test_merge_true_and_perm_2(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('b&c'),
parse('a')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(tableau.children))
def test_merge_false_impl(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
l_expr = [
parse('!s')
]
r_expr = [
parse('!p')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, expr_f[0], None), [l_expr], [r_expr], [[]], [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(4, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_true_impl(self):
expr_t = [
parse('q->r'),
parse('a&b&c'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[], [parse('r')],
]
r_expr = [
[parse('q')], [],
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
sequent = tableau.sequent
self.assertEqual(2, len(tableau.children))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_processed])
c_s_1 = tableau.children[0].sequent
c_s_2 = tableau.children[1].sequent
self.assertEqual(2, len(c_s_1[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), c_s_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), c_s_1[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(c_s_1[BaseTableauxBuilder.false_atoms]))
self.assertIn(parse('q'), c_s_1[BaseTableauxBuilder.false_atoms])
self.assertEqual(0, len(c_s_1[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(c_s_1[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(c_s_1[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), c_s_1[BaseTableauxBuilder.true_processed])
self.assertEqual(2, len(c_s_2[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), c_s_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), c_s_2[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(c_s_2[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('r'), c_s_2[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(c_s_2[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(c_s_2[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(c_s_2[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), c_s_2[BaseTableauxBuilder.true_processed])
def test_merge_cf_or(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s|q')
]
l_expr = []
r_expr = []
cf_expr = [
parse('s'),
parse('q')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), [l_expr], [r_expr], [cf_expr], [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertIn(parse('q'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_cf_and(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s&q')
]
l_expr = [[],[]]
r_expr = [[],[]]
cf_expr = [
[parse('s')],
[parse('q')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(2, len(tableau.children))
sequent_1 = tableau.children[0].sequent
sequent_2 = tableau.children[1].sequent
self.assertEqual(3, len(sequent_1[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(1, len(sequent_1[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent_1[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_processed]))
self.assertEqual(3, len(sequent_2[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(1, len(sequent_2[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('q'), sequent_2[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_processed]))
def test_merge_cf_impl(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s->q')
]
l_expr = [[
parse('s')
]]
r_expr = [[
parse('q')
]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertIn(parse('q'), sequent[BaseTableauxBuilder.false_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_cf_eq(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s<->q')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('(s->q)&(q->s)')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('(s->q)&(q->s)'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
self.assertIn(parse('s<->q'), sequent[BaseTableauxBuilder.certain_falsehood_processed])
def test_merge_cf_not(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('!s')
]
l_expr = [[
parse('s')
]]
r_expr = [[]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_processed])
def test_merge_true_not(self):
expr_t = [
parse('!s'),
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('!s')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('s')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, cf_expr, [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
def test_merge_false_not(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s'),
parse('!s->!p'),
parse('t&f'),
]
expr_cf = [
parse('!s')
]
l_expr = [[
parse('s')
]]
r_expr = [[]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, expr_f[0], None), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
| 33.764763 | 99 | 0.577998 | import unittest
from src.builder_factory import LogicType
from src.Parser.PropParser import PropParser
from src.TableauxBuilder.BaseManualTableau import BaseManualTableau, BaseTableauxBuilder
from src.TableauxBuilder.IpcTableauxBuilder import IpcTableauxBuilder
def parse(expr: str):
return PropParser.parse(expr).expr
class TestPlManualTableau(unittest.TestCase):
def test_incorrect_1(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [[]], [[]], [])
self.assertFalse(success)
def test_incorrect_2(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
parse('(a->c)'),
]
r_expr = [
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_3(self):
expr = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
]
r_expr = [
parse('(b->c)'),
]
tableau = IpcTableauxBuilder(false_exprs=[expr])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_4(self):
expr_t = parse('(a->b)&(b->c)->(a->c)')
expr_f = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)'),
]
r_expr = [
parse('(a->c)'),
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_5(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = [
parse('a'),
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_incorrect_6(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = [
parse('a'),
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [[]], [])
self.assertFalse(success)
def test_correct_1(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = []
cf_expr = [
parse('!a')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t, None, None), [l_expr], [r_expr], [cf_expr], [])
self.assertTrue(success)
def test_correct_2(self):
expr_t = parse('!!a')
expr_f = parse('a|b')
l_expr = []
r_expr = [
parse('a'),
parse('b')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_3(self):
expr_t = parse('!!a')
expr_f = parse('(a->b)&(b->c)->(a->c)')
l_expr = [
parse('(a->b)&(b->c)')
]
r_expr = [
parse('(a->c)')
]
tableau = IpcTableauxBuilder(true_exprs=[expr_t], false_exprs=[expr_f])
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_4(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('!s')
]
r_expr = [
parse('!p')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, expr_f[0], None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_5(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('q')], []
]
r_expr = [
[], [parse('p')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_6(self):
expr_t = [
parse('p->q'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[], [parse('q')]
]
r_expr = [
[parse('p')], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_7(self):
expr_t = [
parse('a|b'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('b')], [parse('a')]
]
r_expr = [
[], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_8(self):
expr_t = [
parse('a|b'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[parse('a')], [parse('b')]
]
r_expr = [
[], []
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
self.assertTrue(success)
def test_correct_9(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a'),
parse('b&c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_10(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a&b'),
parse('c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_11(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('!a')
]
l_expr = [
parse('a')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), [l_expr], [r_expr], [[]], [])
self.assertTrue(success)
def test_correct_12(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a&b')
]
l_expr = [[],[]]
r_expr = [[],[]]
cf_expr = [
[parse('a')],
[parse('b')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_13(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a|b')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('a'),
parse('b')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_14(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a->b')
]
l_expr = [[
parse('a')
]]
r_expr = [[
parse('b')
]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_correct_15(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
expr_cf = [
parse('a<->b')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('(a->b)&(b->a)')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs=expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
success = manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertTrue(success)
def test_merge_true_and_perm_1(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('a&b'),
parse('c')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('c'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(tableau.children))
def test_merge_true_and_perm_2(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
parse('b&c'),
parse('a')
]
r_expr = []
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), [l_expr], [r_expr], [[]], [])
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(tableau.children))
def test_merge_false_impl(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
l_expr = [
parse('!s')
]
r_expr = [
parse('!p')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, expr_f[0], None), [l_expr], [r_expr], [[]], [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(4, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_true_impl(self):
expr_t = [
parse('q->r'),
parse('a&b&c'),
parse('r->s'),
]
expr_f = [
parse('!s->!p')
]
l_expr = [
[], [parse('r')],
]
r_expr = [
[parse('q')], [],
]
tableau = IpcTableauxBuilder(true_exprs=expr_t, false_exprs=expr_f)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, [[],[]], [])
sequent = tableau.sequent
self.assertEqual(2, len(tableau.children))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_processed])
c_s_1 = tableau.children[0].sequent
c_s_2 = tableau.children[1].sequent
self.assertEqual(2, len(c_s_1[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), c_s_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), c_s_1[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(c_s_1[BaseTableauxBuilder.false_atoms]))
self.assertIn(parse('q'), c_s_1[BaseTableauxBuilder.false_atoms])
self.assertEqual(0, len(c_s_1[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(c_s_1[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(c_s_1[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), c_s_1[BaseTableauxBuilder.true_processed])
self.assertEqual(2, len(c_s_2[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('r->s'), c_s_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), c_s_2[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(c_s_2[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('r'), c_s_2[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(c_s_2[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(c_s_2[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(c_s_2[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('q->r'), c_s_2[BaseTableauxBuilder.true_processed])
def test_merge_cf_or(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s|q')
]
l_expr = []
r_expr = []
cf_expr = [
parse('s'),
parse('q')
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), [l_expr], [r_expr], [cf_expr], [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertIn(parse('q'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_cf_and(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s&q')
]
l_expr = [[],[]]
r_expr = [[],[]]
cf_expr = [
[parse('s')],
[parse('q')]
]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(2, len(tableau.children))
sequent_1 = tableau.children[0].sequent
sequent_2 = tableau.children[1].sequent
self.assertEqual(3, len(sequent_1[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent_1[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(1, len(sequent_1[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent_1[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent_1[BaseTableauxBuilder.false_processed]))
self.assertEqual(3, len(sequent_2[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent_2[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(1, len(sequent_2[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('q'), sequent_2[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.true_atoms]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent_2[BaseTableauxBuilder.false_processed]))
def test_merge_cf_impl(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s->q')
]
l_expr = [[
parse('s')
]]
r_expr = [[
parse('q')
]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertIn(parse('q'), sequent[BaseTableauxBuilder.false_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
def test_merge_cf_eq(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('s<->q')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('(s->q)&(q->s)')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('(s->q)&(q->s)'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
self.assertIn(parse('s<->q'), sequent[BaseTableauxBuilder.certain_falsehood_processed])
def test_merge_cf_not(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('!s')
]
l_expr = [[
parse('s')
]]
r_expr = [[]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, None, expr_cf[0]), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_processed])
def test_merge_true_not(self):
expr_t = [
parse('!s'),
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s->!p'),
parse('t&f')
]
expr_cf = [
parse('!s')
]
l_expr = [[]]
r_expr = [[]]
cf_expr = [[
parse('s')
]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((expr_t[0], None, None), l_expr, r_expr, cf_expr, [])
self.assertEqual(0, len(tableau.children))
sequent = tableau.sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertEqual(2, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertIn(parse('!s->!p'), sequent[BaseTableauxBuilder.false_exprs])
self.assertIn(parse('t&f'), sequent[BaseTableauxBuilder.false_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.certain_falsehood_atoms])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.true_processed])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
def test_merge_false_not(self):
expr_t = [
parse('a&b&c'),
parse('q->r'),
parse('r->s'),
]
expr_f = [
parse('!s'),
parse('!s->!p'),
parse('t&f'),
]
expr_cf = [
parse('!s')
]
l_expr = [[
parse('s')
]]
r_expr = [[]]
cf_expr = [[]]
tableau = IpcTableauxBuilder(true_exprs=expr_t,
false_exprs=expr_f,
cf_exprs = expr_cf)
manual_tableau = BaseManualTableau(LogicType.IPROPOSITIONAL, tableau)
manual_tableau.merge((None, expr_f[0], None), l_expr, r_expr, cf_expr, [])
self.assertEqual(1, len(tableau.children))
sequent = tableau.children[0].sequent
self.assertEqual(3, len(sequent[BaseTableauxBuilder.true_exprs]))
self.assertIn(parse('q->r'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('r->s'), sequent[BaseTableauxBuilder.true_exprs])
self.assertIn(parse('a&b&c'), sequent[BaseTableauxBuilder.true_exprs])
self.assertEqual(1, len(sequent[BaseTableauxBuilder.true_atoms]))
self.assertIn(parse('s'), sequent[BaseTableauxBuilder.true_atoms])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_exprs]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_atoms]))
self.assertEqual(1, len(sequent[BaseTableauxBuilder.certain_falsehood_exprs]))
self.assertIn(parse('!s'), sequent[BaseTableauxBuilder.certain_falsehood_exprs])
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_atoms]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.true_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.false_processed]))
self.assertEqual(0, len(sequent[BaseTableauxBuilder.certain_falsehood_processed]))
| true | true |
1c3384e6fe16456cdbde54061a311b075d5a9da1 | 29 | py | Python | tests/test_generate_isogeom.py | kkiesling/isogeom-generator | 5363c8431694b33f8c8de329ee31b3a6d67ccc7e | [
"MIT"
] | 1 | 2021-01-07T02:45:38.000Z | 2021-01-07T02:45:38.000Z | tests/test_generate_isogeom.py | kkiesling/isogeom-generator | 5363c8431694b33f8c8de329ee31b3a6d67ccc7e | [
"MIT"
] | 21 | 2020-05-18T16:38:22.000Z | 2021-08-24T16:46:24.000Z | tests/test_generate_isogeom.py | kkiesling/isogeom-generator | 5363c8431694b33f8c8de329ee31b3a6d67ccc7e | [
"MIT"
] | 1 | 2020-05-18T16:36:35.000Z | 2020-05-18T16:36:35.000Z | """tests for the CLI tool"""
| 14.5 | 28 | 0.62069 | true | true | |
1c33851d2b3f96989d1efe22871a59d33273da9d | 141 | py | Python | apps/example/urls.py | evertrol/det | 5d397010bc9a608dcb38c176bb3f89e4f17ab272 | [
"MIT"
] | null | null | null | apps/example/urls.py | evertrol/det | 5d397010bc9a608dcb38c176bb3f89e4f17ab272 | [
"MIT"
] | null | null | null | apps/example/urls.py | evertrol/det | 5d397010bc9a608dcb38c176bb3f89e4f17ab272 | [
"MIT"
] | null | null | null | from django.urls import path, include
from . import views
app_name = 'example'
urlpatterns = [
path('', views.index, name='index'),
]
| 14.1 | 40 | 0.673759 | from django.urls import path, include
from . import views
app_name = 'example'
urlpatterns = [
path('', views.index, name='index'),
]
| true | true |
1c3385c27648c79188c5318fa397b3bfcb64799f | 1,397 | py | Python | bot.py | Juniorredcoder/G-Bot | c733afd912351137a9a1d21451ea442b1ac9d7f2 | [
"Unlicense"
] | 1 | 2022-02-10T15:29:33.000Z | 2022-02-10T15:29:33.000Z | bot.py | Juniorredcoder/G-Bot | c733afd912351137a9a1d21451ea442b1ac9d7f2 | [
"Unlicense"
] | null | null | null | bot.py | Juniorredcoder/G-Bot | c733afd912351137a9a1d21451ea442b1ac9d7f2 | [
"Unlicense"
] | null | null | null | print("Type 'help' for show command")
import os
from colorama import init
from colorama import Fore
init()
c=1
while True :
pw=input(Fore.RED + 'G-BOT>')
print()
if pw == 'help':
print("----------COMMAND----------\n\n 1.hey g-bot\n 2.i am fine and you\n 3.who is your boss\n 4.who gave you this name\n 5.what is the g-one github account\n 6.Thank you for help")
if pw == 'hey g-bot':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "hello sir how are you" -ven+f4')
if pw == 'i am fine and you':
os.system('espeak -a 150 -s 140 -p 65 -g 11 "i am also fine sir" -ven+f4')
if pw == 'who is your boss':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "G-one sir is my boss" -ven+f4')
if pw == 'who gave you this name':
os.system('espeak -a 150 -s 140 -p 65 -g 11 "G-one gave me this name" -ven+f4')
if pw == 'what is the g-one github account':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "G-one github account is:https://github.com/Juniorredcoder/" -ven+f4')
if pw == 'Thank you for help':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "welcome sir" -ven+f4')
if pw == 'help':
break
c+=1
if c == 500000 :
print('you have reached the max number of attempts !!')
break
os.system('espeak -a 150 -s 140 -p 65 -g 11 "i dont understand" -ven+f4')
| 43.65625 | 189 | 0.569077 | print("Type 'help' for show command")
import os
from colorama import init
from colorama import Fore
init()
c=1
while True :
pw=input(Fore.RED + 'G-BOT>')
print()
if pw == 'help':
print("----------COMMAND----------\n\n 1.hey g-bot\n 2.i am fine and you\n 3.who is your boss\n 4.who gave you this name\n 5.what is the g-one github account\n 6.Thank you for help")
if pw == 'hey g-bot':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "hello sir how are you" -ven+f4')
if pw == 'i am fine and you':
os.system('espeak -a 150 -s 140 -p 65 -g 11 "i am also fine sir" -ven+f4')
if pw == 'who is your boss':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "G-one sir is my boss" -ven+f4')
if pw == 'who gave you this name':
os.system('espeak -a 150 -s 140 -p 65 -g 11 "G-one gave me this name" -ven+f4')
if pw == 'what is the g-one github account':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "G-one github account is:https://github.com/Juniorredcoder/" -ven+f4')
if pw == 'Thank you for help':
os.system('espeak -a 200 -s 150 -p 70 -g 15 "welcome sir" -ven+f4')
if pw == 'help':
break
c+=1
if c == 500000 :
print('you have reached the max number of attempts !!')
break
os.system('espeak -a 150 -s 140 -p 65 -g 11 "i dont understand" -ven+f4')
| true | true |
1c338654e5da1e13145829ea4df34006bc84f57f | 3,698 | py | Python | tests/test_schedules.py | sunny316/redbeat | 51db315ef182f28cbb4a8d33af0b78bd816ac9a6 | [
"Apache-2.0"
] | null | null | null | tests/test_schedules.py | sunny316/redbeat | 51db315ef182f28cbb4a8d33af0b78bd816ac9a6 | [
"Apache-2.0"
] | null | null | null | tests/test_schedules.py | sunny316/redbeat | 51db315ef182f28cbb4a8d33af0b78bd816ac9a6 | [
"Apache-2.0"
] | null | null | null | from datetime import (
datetime,
timedelta
)
from unittest import TestCase
from mock import patch
try: # celery 3.x
from celery.utils.timeutils import timezone
except ImportError: # celery 4.x
from celery.utils.time import timezone
from redbeat.schedules import rrule
@patch.object(rrule, 'now', datetime.utcnow)
@patch.object(rrule, 'utc_enabled', True)
@patch.object(rrule, 'tz', timezone.utc)
class test_rrule_remaining_estimate(TestCase):
def test_freq(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1))
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertTrue(eta_after_one_minute.total_seconds() > 0)
def test_freq__with_single_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1), count=1)
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertEqual(eta_after_one_minute, None)
def test_freq__with_multiple_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1), count=2)
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
eta_after_two_minutes = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=2))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertTrue(eta_after_one_minute.total_seconds() > 0)
self.assertEqual(eta_after_two_minutes, None)
@patch.object(rrule, 'now', datetime.utcnow)
@patch.object(rrule, 'utc_enabled', True)
@patch.object(rrule, 'tz', timezone.utc)
class test_rrule_is_due(TestCase):
def test_freq__starting_now(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow())
# Assuming job was never run, i.e. last_run_at == epoch
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertTrue(next > 0)
def test_freq__starts_after_one_minute(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1))
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertTrue(next > 0)
def test_freq__with_single_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow(), count=1)
# If job was never run, it should be due and have
# no ETA for the following occurrence.
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertEqual(next, None)
# It should not be due if it was already run once.
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertEqual(next, None)
def test_freq__with_multiple_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow(), count=2)
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertTrue(next > 0)
# There should still be one more occurrence remaining
# if it was run once after dtstart.
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertTrue(next > 0)
# There should be no more occurrences after one minute.
is_due, next = r.is_due(datetime.utcnow() + timedelta(minutes=1))
self.assertFalse(is_due)
self.assertEqual(next, None)
| 42.022727 | 94 | 0.685506 | from datetime import (
datetime,
timedelta
)
from unittest import TestCase
from mock import patch
try:
from celery.utils.timeutils import timezone
except ImportError:
from celery.utils.time import timezone
from redbeat.schedules import rrule
@patch.object(rrule, 'now', datetime.utcnow)
@patch.object(rrule, 'utc_enabled', True)
@patch.object(rrule, 'tz', timezone.utc)
class test_rrule_remaining_estimate(TestCase):
def test_freq(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1))
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertTrue(eta_after_one_minute.total_seconds() > 0)
def test_freq__with_single_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1), count=1)
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertEqual(eta_after_one_minute, None)
def test_freq__with_multiple_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1), count=2)
eta_from_now = r.remaining_estimate(datetime.utcnow())
eta_after_one_minute = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=1))
eta_after_two_minutes = r.remaining_estimate(datetime.utcnow() + timedelta(minutes=2))
self.assertTrue(eta_from_now.total_seconds() > 0)
self.assertTrue(eta_after_one_minute.total_seconds() > 0)
self.assertEqual(eta_after_two_minutes, None)
@patch.object(rrule, 'now', datetime.utcnow)
@patch.object(rrule, 'utc_enabled', True)
@patch.object(rrule, 'tz', timezone.utc)
class test_rrule_is_due(TestCase):
def test_freq__starting_now(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow())
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertTrue(next > 0)
def test_freq__starts_after_one_minute(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow() + timedelta(minutes=1))
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertTrue(next > 0)
def test_freq__with_single_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow(), count=1)
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertEqual(next, None)
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertEqual(next, None)
def test_freq__with_multiple_count(self):
r = rrule('MINUTELY', dtstart=datetime.utcnow(), count=2)
is_due, next = r.is_due(datetime(1970, 1, 1))
self.assertTrue(is_due)
self.assertTrue(next > 0)
is_due, next = r.is_due(datetime.utcnow())
self.assertFalse(is_due)
self.assertTrue(next > 0)
is_due, next = r.is_due(datetime.utcnow() + timedelta(minutes=1))
self.assertFalse(is_due)
self.assertEqual(next, None)
| true | true |
1c3386f366875ac76dffe8e01e1ffd6fa00ba05d | 6,725 | py | Python | metadata-ingestion/src/datahub/ingestion/source/snowflake_usage.py | emailstonl/datahub | 19b2a42a00ca43a7d042ae2a8e5d05498df7f2a9 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/ingestion/source/snowflake_usage.py | emailstonl/datahub | 19b2a42a00ca43a7d042ae2a8e5d05498df7f2a9 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/ingestion/source/snowflake_usage.py | emailstonl/datahub | 19b2a42a00ca43a7d042ae2a8e5d05498df7f2a9 | [
"Apache-2.0"
] | null | null | null | import collections
import dataclasses
import json
import logging
from datetime import datetime, timezone
from typing import Dict, Iterable, List, Optional
import pydantic
import pydantic.dataclasses
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
import datahub.emitter.mce_builder as builder
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import UsageStatsWorkUnit
from datahub.ingestion.source.snowflake import SnowflakeConfig
from datahub.ingestion.source.usage_common import (
BaseUsageConfig,
GenericAggregatedDataset,
get_time_bucket,
)
logger = logging.getLogger(__name__)
SnowflakeTableRef = str
AggregatedDataset = GenericAggregatedDataset[SnowflakeTableRef]
SNOWFLAKE_USAGE_SQL_TEMPLATE = """
SELECT
-- access_history.query_id, -- only for debugging purposes
access_history.query_start_time,
query_history.query_text,
query_history.query_type,
access_history.base_objects_accessed,
-- access_history.direct_objects_accessed, -- might be useful in the future
-- query_history.execution_status, -- not really necessary, but should equal "SUCCESS"
-- query_history.warehouse_name,
access_history.user_name,
users.first_name,
users.last_name,
users.display_name,
users.email,
query_history.role_name
FROM
snowflake.account_usage.access_history access_history
LEFT JOIN
snowflake.account_usage.query_history query_history
ON access_history.query_id = query_history.query_id
LEFT JOIN
snowflake.account_usage.users users
ON access_history.user_name = users.name
WHERE ARRAY_SIZE(base_objects_accessed) > 0
AND query_start_time >= to_timestamp_ltz({start_time_millis}, 3)
AND query_start_time < to_timestamp_ltz({end_time_millis}, 3)
ORDER BY query_start_time DESC
;
""".strip()
@pydantic.dataclasses.dataclass
class SnowflakeColumnReference:
columnId: int
columnName: str
@pydantic.dataclasses.dataclass
class SnowflakeObjectAccessEntry:
columns: List[SnowflakeColumnReference]
objectDomain: str
objectId: int
objectName: str
@pydantic.dataclasses.dataclass
class SnowflakeJoinedAccessEvent:
query_start_time: datetime
query_text: str
query_type: str
base_objects_accessed: List[SnowflakeObjectAccessEntry]
user_name: str
first_name: Optional[str]
last_name: Optional[str]
display_name: Optional[str]
email: str
role_name: str
class SnowflakeUsageConfig(SnowflakeConfig, BaseUsageConfig):
database: str = "snowflake"
@pydantic.validator("role", always=True)
def role_accountadmin(cls, v):
if not v or v.lower() != "accountadmin":
# This isn't an error, since the privileges can be delegated to other
# roles as well: https://docs.snowflake.com/en/sql-reference/account-usage.html#enabling-account-usage-for-other-roles
logger.info(
'snowflake usage tables are only accessible by role "accountadmin" by default; you set %s',
v,
)
return v
@dataclasses.dataclass
class SnowflakeUsageSource(Source):
config: SnowflakeUsageConfig
report: SourceReport = dataclasses.field(default_factory=SourceReport)
@classmethod
def create(cls, config_dict, ctx):
config = SnowflakeUsageConfig.parse_obj(config_dict)
return cls(ctx, config)
def get_workunits(self) -> Iterable[UsageStatsWorkUnit]:
access_events = self._get_snowflake_history()
aggregated_info = self._aggregate_access_events(access_events)
for time_bucket in aggregated_info.values():
for aggregate in time_bucket.values():
wu = self._make_usage_stat(aggregate)
self.report.report_workunit(wu)
yield wu
def _make_usage_query(self) -> str:
return SNOWFLAKE_USAGE_SQL_TEMPLATE.format(
start_time_millis=int(self.config.start_time.timestamp() * 1000),
end_time_millis=int(self.config.end_time.timestamp() * 1000),
)
def _make_sql_engine(self) -> Engine:
url = self.config.get_sql_alchemy_url()
logger.debug(f"sql_alchemy_url={url}")
engine = create_engine(url, **self.config.options)
return engine
def _get_snowflake_history(self) -> Iterable[SnowflakeJoinedAccessEvent]:
query = self._make_usage_query()
engine = self._make_sql_engine()
results = engine.execute(query)
for row in results:
# Make some minor type conversions.
if hasattr(row, "_asdict"):
# Compat with SQLAlchemy 1.3 and 1.4
# See https://docs.sqlalchemy.org/en/14/changelog/migration_14.html#rowproxy-is-no-longer-a-proxy-is-now-called-row-and-behaves-like-an-enhanced-named-tuple.
event_dict = row._asdict()
else:
event_dict = dict(row)
event_dict["base_objects_accessed"] = json.loads(
event_dict["base_objects_accessed"]
)
event_dict["query_start_time"] = (
event_dict["query_start_time"]
).astimezone(tz=timezone.utc)
event = SnowflakeJoinedAccessEvent(**event_dict)
yield event
def _aggregate_access_events(
self, events: Iterable[SnowflakeJoinedAccessEvent]
) -> Dict[datetime, Dict[SnowflakeTableRef, AggregatedDataset]]:
datasets: Dict[
datetime, Dict[SnowflakeTableRef, AggregatedDataset]
] = collections.defaultdict(dict)
for event in events:
floored_ts = get_time_bucket(
event.query_start_time, self.config.bucket_duration
)
for object in event.base_objects_accessed:
resource = object.objectName
agg_bucket = datasets[floored_ts].setdefault(
resource,
AggregatedDataset(bucket_start_time=floored_ts, resource=resource),
)
agg_bucket.add_read_entry(
event.email,
event.query_text,
[colRef.columnName.lower() for colRef in object.columns],
)
return datasets
def _make_usage_stat(self, agg: AggregatedDataset) -> UsageStatsWorkUnit:
return agg.make_usage_workunit(
self.config.bucket_duration,
lambda resource: builder.make_dataset_urn(
"snowflake", resource.lower(), self.config.env
),
self.config.top_n_queries,
)
def get_report(self):
return self.report
def close(self):
pass
| 33.625 | 173 | 0.684164 | import collections
import dataclasses
import json
import logging
from datetime import datetime, timezone
from typing import Dict, Iterable, List, Optional
import pydantic
import pydantic.dataclasses
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
import datahub.emitter.mce_builder as builder
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import UsageStatsWorkUnit
from datahub.ingestion.source.snowflake import SnowflakeConfig
from datahub.ingestion.source.usage_common import (
BaseUsageConfig,
GenericAggregatedDataset,
get_time_bucket,
)
logger = logging.getLogger(__name__)
SnowflakeTableRef = str
AggregatedDataset = GenericAggregatedDataset[SnowflakeTableRef]
SNOWFLAKE_USAGE_SQL_TEMPLATE = """
SELECT
-- access_history.query_id, -- only for debugging purposes
access_history.query_start_time,
query_history.query_text,
query_history.query_type,
access_history.base_objects_accessed,
-- access_history.direct_objects_accessed, -- might be useful in the future
-- query_history.execution_status, -- not really necessary, but should equal "SUCCESS"
-- query_history.warehouse_name,
access_history.user_name,
users.first_name,
users.last_name,
users.display_name,
users.email,
query_history.role_name
FROM
snowflake.account_usage.access_history access_history
LEFT JOIN
snowflake.account_usage.query_history query_history
ON access_history.query_id = query_history.query_id
LEFT JOIN
snowflake.account_usage.users users
ON access_history.user_name = users.name
WHERE ARRAY_SIZE(base_objects_accessed) > 0
AND query_start_time >= to_timestamp_ltz({start_time_millis}, 3)
AND query_start_time < to_timestamp_ltz({end_time_millis}, 3)
ORDER BY query_start_time DESC
;
""".strip()
@pydantic.dataclasses.dataclass
class SnowflakeColumnReference:
columnId: int
columnName: str
@pydantic.dataclasses.dataclass
class SnowflakeObjectAccessEntry:
columns: List[SnowflakeColumnReference]
objectDomain: str
objectId: int
objectName: str
@pydantic.dataclasses.dataclass
class SnowflakeJoinedAccessEvent:
query_start_time: datetime
query_text: str
query_type: str
base_objects_accessed: List[SnowflakeObjectAccessEntry]
user_name: str
first_name: Optional[str]
last_name: Optional[str]
display_name: Optional[str]
email: str
role_name: str
class SnowflakeUsageConfig(SnowflakeConfig, BaseUsageConfig):
database: str = "snowflake"
@pydantic.validator("role", always=True)
def role_accountadmin(cls, v):
if not v or v.lower() != "accountadmin":
# roles as well: https://docs.snowflake.com/en/sql-reference/account-usage.html#enabling-account-usage-for-other-roles
logger.info(
'snowflake usage tables are only accessible by role "accountadmin" by default; you set %s',
v,
)
return v
@dataclasses.dataclass
class SnowflakeUsageSource(Source):
config: SnowflakeUsageConfig
report: SourceReport = dataclasses.field(default_factory=SourceReport)
@classmethod
def create(cls, config_dict, ctx):
config = SnowflakeUsageConfig.parse_obj(config_dict)
return cls(ctx, config)
def get_workunits(self) -> Iterable[UsageStatsWorkUnit]:
access_events = self._get_snowflake_history()
aggregated_info = self._aggregate_access_events(access_events)
for time_bucket in aggregated_info.values():
for aggregate in time_bucket.values():
wu = self._make_usage_stat(aggregate)
self.report.report_workunit(wu)
yield wu
def _make_usage_query(self) -> str:
return SNOWFLAKE_USAGE_SQL_TEMPLATE.format(
start_time_millis=int(self.config.start_time.timestamp() * 1000),
end_time_millis=int(self.config.end_time.timestamp() * 1000),
)
def _make_sql_engine(self) -> Engine:
url = self.config.get_sql_alchemy_url()
logger.debug(f"sql_alchemy_url={url}")
engine = create_engine(url, **self.config.options)
return engine
def _get_snowflake_history(self) -> Iterable[SnowflakeJoinedAccessEvent]:
query = self._make_usage_query()
engine = self._make_sql_engine()
results = engine.execute(query)
for row in results:
# Make some minor type conversions.
if hasattr(row, "_asdict"):
# Compat with SQLAlchemy 1.3 and 1.4
# See https://docs.sqlalchemy.org/en/14/changelog/migration_14.html#rowproxy-is-no-longer-a-proxy-is-now-called-row-and-behaves-like-an-enhanced-named-tuple.
event_dict = row._asdict()
else:
event_dict = dict(row)
event_dict["base_objects_accessed"] = json.loads(
event_dict["base_objects_accessed"]
)
event_dict["query_start_time"] = (
event_dict["query_start_time"]
).astimezone(tz=timezone.utc)
event = SnowflakeJoinedAccessEvent(**event_dict)
yield event
def _aggregate_access_events(
self, events: Iterable[SnowflakeJoinedAccessEvent]
) -> Dict[datetime, Dict[SnowflakeTableRef, AggregatedDataset]]:
datasets: Dict[
datetime, Dict[SnowflakeTableRef, AggregatedDataset]
] = collections.defaultdict(dict)
for event in events:
floored_ts = get_time_bucket(
event.query_start_time, self.config.bucket_duration
)
for object in event.base_objects_accessed:
resource = object.objectName
agg_bucket = datasets[floored_ts].setdefault(
resource,
AggregatedDataset(bucket_start_time=floored_ts, resource=resource),
)
agg_bucket.add_read_entry(
event.email,
event.query_text,
[colRef.columnName.lower() for colRef in object.columns],
)
return datasets
def _make_usage_stat(self, agg: AggregatedDataset) -> UsageStatsWorkUnit:
return agg.make_usage_workunit(
self.config.bucket_duration,
lambda resource: builder.make_dataset_urn(
"snowflake", resource.lower(), self.config.env
),
self.config.top_n_queries,
)
def get_report(self):
return self.report
def close(self):
pass
| true | true |
1c338708ba9ffe0bff745b5465014d4deb775853 | 5,676 | py | Python | landlab/io/shapefile/read_shapefile.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | landlab/io/shapefile/read_shapefile.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | 1 | 2016-03-16T02:34:08.000Z | 2016-04-20T19:31:30.000Z | landlab/io/shapefile/read_shapefile.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to read shapefiles and create a NetworkModelGrid.
"""
import shapefile as ps
from shapefile import ShapefileException
from landlab.grid.network import NetworkModelGrid
def read_shapefile(file, dbf=None, store_polyline_vertices=True):
"""Read shapefile and create a NetworkModelGrid.
There are a number of assumptions that are requied about the shapefile.
* The shape file must be a polyline shapefile.
* All polylines must be their own object (e.g. no multi-part
polylines).
* Polyline endpoints match perfectly.
You might notice that there is no ``write_shapefile`` function. If this is
something you need for your work, please make a GitHub issue to start this
process.
Parameters
----------
file : str or file-like
File path or file-like of a valid shapefile
dbf : file-like, optional
If file is file-like, the dbf must also be passed.
store_polyline_vertices: bool, optional
If True (default), store the vertices of the polylines in
the at_link fields ``x_of_polyline`` and ``y_of_polyline``.
Returns
-------
grid : NetworkModelGrid instance
The network model grid will have nodes at the endpoints of the
polylines, and links that connect these nodes. Any fields
associated with the shapefile will be added as at-link fields.
Examples
--------
First, we make a simple shapefile
>>> from six import BytesIO
>>> import shapefile
>>> shp = BytesIO()
>>> shx = BytesIO()
>>> dbf = BytesIO()
>>> w = shapefile.Writer(shp=shp, shx=shx, dbf=dbf)
>>> w.shapeType = 3
>>> w.field("spam", "N")
>>> w.line([[[5,0],[5,5]]])
>>> w.record(100)
>>> w.line([[[5,5],[0,10]]])
>>> w.record(239)
>>> w.line([[[5,5],[10,10]]])
>>> w.record(37)
>>> w.close()
Now create a NetworkModelGrid with read_shapefile:
>>> from landlab.io import read_shapefile
>>> grid = read_shapefile(shp, dbf=dbf)
>>> grid.nodes
array([0, 1, 2, 3])
>>> grid.x_of_node
array([ 5., 5., 0., 10.])
>>> grid.y_of_node
array([ 0., 5., 10., 10.])
>>> grid.nodes_at_link
array([[0, 1],
[1, 2],
[1, 3]])
>>> assert "spam" in grid.at_link
>>> grid.at_link["spam"]
array([100, 239, 37])
"""
try:
sf = ps.Reader(file)
except ShapefileException:
try:
sf = ps.Reader(shp=file, dbf=dbf)
except ShapefileException:
raise ShapefileException(("Bad file path provided to read_shapefile."))
if sf.shapeType != 3:
raise ValueError(
(
"landlab.io.shapefile read requires a polyline "
"type shapefile. The provided shapefile does "
"not meet these requirements."
)
)
# get record information, the first element is ('DeletionFlag', 'C', 1, 0)
# which we will ignore.
records = sf.fields[1:]
# initialize data structures for node (x,y) tuples,
# link (head_node_id, tail_node_id) tuples, and a dictionary of at-link
# fields.
# besides the at-link fields on the shapefile, we'll also store an array of
# x and y of the full polyline segment'.
node_xy = []
links = []
fields = {rec[0]: [] for rec in records}
if store_polyline_vertices:
fields["x_of_polyline"] = []
fields["y_of_polyline"] = []
record_order = [rec[0] for rec in records]
# itterate through shapes and records
shapeRecs = sf.shapeRecords()
for sr in shapeRecs:
# if not a multi-part polyline:
if len(sr.shape.parts) == 1:
# get all the points on the polyline and deconstruct into x and y
points = sr.shape.points
x, y = zip(*points)
# construct the (x,y) tuples of the head and tail nodes of each
# polyline. Note here, that head and tail just refer to starting and
# ending, they will be re-oriented if necessary by landlab.
head_node_xy = (x[0], y[0])
tail_node_xy = (x[-1], y[-1])
# we should expect that the head node and tail node of later links will
# already be part of the model grid. So we check, and add the nodes,
# if they don't already exist.
if head_node_xy not in node_xy:
node_xy.append(head_node_xy)
if tail_node_xy not in node_xy:
node_xy.append(tail_node_xy)
# get the index of the head and tail node index.
head_node__node_id = node_xy.index(head_node_xy)
tail_node__node_id = node_xy.index(tail_node_xy)
# append the head and tail node ids to the link array
links.append((head_node__node_id, tail_node__node_id))
for i in range(len(sr.record)):
field_name = record_order[i]
fields[field_name].append(sr.record[i])
if store_polyline_vertices:
fields["x_of_polyline"].append(x)
fields["y_of_polyline"].append(y)
else:
raise ValueError(
(
"landlab.io.shapefile currently does not support ",
"reading multipart polyline shapefiles.",
)
)
# Create a Network Model Grid
x_of_node, y_of_node = zip(*node_xy)
grid = NetworkModelGrid((y_of_node, x_of_node), links)
for field_name in fields:
grid.at_link[field_name] = fields[field_name]
return grid
| 32.067797 | 83 | 0.59408 |
import shapefile as ps
from shapefile import ShapefileException
from landlab.grid.network import NetworkModelGrid
def read_shapefile(file, dbf=None, store_polyline_vertices=True):
try:
sf = ps.Reader(file)
except ShapefileException:
try:
sf = ps.Reader(shp=file, dbf=dbf)
except ShapefileException:
raise ShapefileException(("Bad file path provided to read_shapefile."))
if sf.shapeType != 3:
raise ValueError(
(
"landlab.io.shapefile read requires a polyline "
"type shapefile. The provided shapefile does "
"not meet these requirements."
)
)
records = sf.fields[1:]
# x and y of the full polyline segment'.
node_xy = []
links = []
fields = {rec[0]: [] for rec in records}
if store_polyline_vertices:
fields["x_of_polyline"] = []
fields["y_of_polyline"] = []
record_order = [rec[0] for rec in records]
shapeRecs = sf.shapeRecords()
for sr in shapeRecs:
if len(sr.shape.parts) == 1:
points = sr.shape.points
x, y = zip(*points)
head_node_xy = (x[0], y[0])
tail_node_xy = (x[-1], y[-1])
if head_node_xy not in node_xy:
node_xy.append(head_node_xy)
if tail_node_xy not in node_xy:
node_xy.append(tail_node_xy)
# get the index of the head and tail node index.
head_node__node_id = node_xy.index(head_node_xy)
tail_node__node_id = node_xy.index(tail_node_xy)
# append the head and tail node ids to the link array
links.append((head_node__node_id, tail_node__node_id))
for i in range(len(sr.record)):
field_name = record_order[i]
fields[field_name].append(sr.record[i])
if store_polyline_vertices:
fields["x_of_polyline"].append(x)
fields["y_of_polyline"].append(y)
else:
raise ValueError(
(
"landlab.io.shapefile currently does not support ",
"reading multipart polyline shapefiles.",
)
)
# Create a Network Model Grid
x_of_node, y_of_node = zip(*node_xy)
grid = NetworkModelGrid((y_of_node, x_of_node), links)
for field_name in fields:
grid.at_link[field_name] = fields[field_name]
return grid
| true | true |
1c33885ab73973c5c33df2e9e517ec6a25907d24 | 1,031 | py | Python | config/api_router.py | unkn1w/Delivery | 21b33f5b8d9f057c129fc78c175b6c79fcd23122 | [
"MIT"
] | null | null | null | config/api_router.py | unkn1w/Delivery | 21b33f5b8d9f057c129fc78c175b6c79fcd23122 | [
"MIT"
] | null | null | null | config/api_router.py | unkn1w/Delivery | 21b33f5b8d9f057c129fc78c175b6c79fcd23122 | [
"MIT"
] | null | null | null | from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from delivery_proj.users.api.views import (
UserViewSet,
RestaurantViewSet,
CreateRestaurantViewSet,
CourierViewSet,
BuyerViewSet,
)
from delivery_proj.dishes.views import DishesListViewSet, DishesCreateViewSet
from delivery_proj.cart.views import CartViewSet, OrderViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("dishes", DishesListViewSet)
router.register("dish", DishesCreateViewSet)
router.register("restaurants", RestaurantViewSet, basename="restaurants")
router.register("restaurant", CreateRestaurantViewSet, basename="restaurant")
router.register("buyer", BuyerViewSet, basename="buyer")
router.register("courier", CourierViewSet, basename="courier")
router.register("cart", CartViewSet, basename="cart")
router.register("order", OrderViewSet, basename="order")
app_name = "api"
urlpatterns = router.urls
| 32.21875 | 77 | 0.794374 | from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from delivery_proj.users.api.views import (
UserViewSet,
RestaurantViewSet,
CreateRestaurantViewSet,
CourierViewSet,
BuyerViewSet,
)
from delivery_proj.dishes.views import DishesListViewSet, DishesCreateViewSet
from delivery_proj.cart.views import CartViewSet, OrderViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("dishes", DishesListViewSet)
router.register("dish", DishesCreateViewSet)
router.register("restaurants", RestaurantViewSet, basename="restaurants")
router.register("restaurant", CreateRestaurantViewSet, basename="restaurant")
router.register("buyer", BuyerViewSet, basename="buyer")
router.register("courier", CourierViewSet, basename="courier")
router.register("cart", CartViewSet, basename="cart")
router.register("order", OrderViewSet, basename="order")
app_name = "api"
urlpatterns = router.urls
| true | true |
1c3389847f06331c2903a02107214390a38adbea | 5,083 | py | Python | doc/development/tutorials/examples/recipe.py | Lu-Yi-Hsun/sphinx | 7e81e1432e4a63bad0b13db5652f9ed02ea04f41 | [
"BSD-2-Clause"
] | 2 | 2021-09-25T12:48:31.000Z | 2021-10-01T16:59:43.000Z | doc/development/tutorials/examples/recipe.py | Lu-Yi-Hsun/sphinx | 7e81e1432e4a63bad0b13db5652f9ed02ea04f41 | [
"BSD-2-Clause"
] | 2 | 2022-02-14T03:20:12.000Z | 2022-03-02T10:44:31.000Z | doc/development/tutorials/examples/recipe.py | Lu-Yi-Hsun/sphinx | 7e81e1432e4a63bad0b13db5652f9ed02ea04f41 | [
"BSD-2-Clause"
] | 1 | 2021-06-06T04:30:02.000Z | 2021-06-06T04:30:02.000Z | from collections import defaultdict
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
class RecipeDirective(ObjectDescription):
"""A custom directive that describes a recipe."""
has_content = True
required_arguments = 1
option_spec = {
'contains': directives.unchanged_required,
}
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(text=sig)
return sig
def add_target_and_index(self, name_cls, sig, signode):
signode['ids'].append('recipe' + '-' + sig)
if 'contains' not in self.options:
ingredients = [
x.strip() for x in self.options.get('contains').split(',')]
recipes = self.env.get_domain('recipe')
recipes.add_recipe(sig, ingredients)
class IngredientIndex(Index):
"""A custom index that creates an ingredient matrix."""
name = 'ingredient'
localname = 'Ingredient Index'
shortname = 'Ingredient'
def generate(self, docnames=None):
content = defaultdict(list)
recipes = {name: (dispname, typ, docname, anchor)
for name, dispname, typ, docname, anchor, _
in self.domain.get_objects()}
recipe_ingredients = self.domain.data['recipe_ingredients']
ingredient_recipes = defaultdict(list)
# flip from recipe_ingredients to ingredient_recipes
for recipe_name, ingredients in recipe_ingredients.items():
for ingredient in ingredients:
ingredient_recipes[ingredient].append(recipe_name)
# convert the mapping of ingredient to recipes to produce the expected
# output, shown below, using the ingredient name as a key to group
#
# name, subtype, docname, anchor, extra, qualifier, description
for ingredient, recipe_names in ingredient_recipes.items():
for recipe_name in recipe_names:
dispname, typ, docname, anchor = recipes[recipe_name]
content[ingredient].append(
(dispname, 0, docname, anchor, docname, '', typ))
# convert the dict to the sorted list of tuples expected
content = sorted(content.items())
return content, True
class RecipeIndex(Index):
"""A custom index that creates an recipe matrix."""
name = 'recipe'
localname = 'Recipe Index'
shortname = 'Recipe'
def generate(self, docnames=None):
content = defaultdict(list)
# sort the list of recipes in alphabetical order
recipes = self.domain.get_objects()
recipes = sorted(recipes, key=lambda recipe: recipe[0])
# generate the expected output, shown below, from the above using the
# first letter of the recipe as a key to group thing
#
# name, subtype, docname, anchor, extra, qualifier, description
for name, dispname, typ, docname, anchor, _ in recipes:
content[dispname[0].lower()].append(
(dispname, 0, docname, anchor, docname, '', typ))
# convert the dict to the sorted list of tuples expected
content = sorted(content.items())
return content, True
class RecipeDomain(Domain):
name = 'recipe'
label = 'Recipe Sample'
roles = {
'ref': XRefRole()
}
directives = {
'recipe': RecipeDirective,
}
indices = {
RecipeIndex,
IngredientIndex
}
initial_data = {
'recipes': [], # object list
'recipe_ingredients': {}, # name -> object
}
def get_full_qualified_name(self, node):
return '{}.{}'.format('recipe', node.arguments[0])
def get_objects(self):
for obj in self.data['recipes']:
yield(obj)
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
match = [(docname, anchor)
for name, sig, typ, docname, anchor, prio
in self.get_objects() if sig == target]
if len(match) > 0:
todocname = match[0][0]
targ = match[0][1]
return make_refnode(builder, fromdocname, todocname, targ,
contnode, targ)
else:
print('Awww, found nothing')
return None
def add_recipe(self, signature, ingredients):
"""Add a new recipe to the domain."""
name = '{}.{}'.format('recipe', signature)
anchor = 'recipe-{}'.format(signature)
self.data['recipe_ingredients'][name] = ingredients
# name, dispname, type, docname, anchor, priority
self.data['recipes'].append(
(name, signature, 'Recipe', self.env.docname, anchor, 0))
def setup(app):
app.add_domain(RecipeDomain)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 31.571429 | 78 | 0.613024 | from collections import defaultdict
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
class RecipeDirective(ObjectDescription):
has_content = True
required_arguments = 1
option_spec = {
'contains': directives.unchanged_required,
}
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(text=sig)
return sig
def add_target_and_index(self, name_cls, sig, signode):
signode['ids'].append('recipe' + '-' + sig)
if 'contains' not in self.options:
ingredients = [
x.strip() for x in self.options.get('contains').split(',')]
recipes = self.env.get_domain('recipe')
recipes.add_recipe(sig, ingredients)
class IngredientIndex(Index):
name = 'ingredient'
localname = 'Ingredient Index'
shortname = 'Ingredient'
def generate(self, docnames=None):
content = defaultdict(list)
recipes = {name: (dispname, typ, docname, anchor)
for name, dispname, typ, docname, anchor, _
in self.domain.get_objects()}
recipe_ingredients = self.domain.data['recipe_ingredients']
ingredient_recipes = defaultdict(list)
for recipe_name, ingredients in recipe_ingredients.items():
for ingredient in ingredients:
ingredient_recipes[ingredient].append(recipe_name)
for ingredient, recipe_names in ingredient_recipes.items():
for recipe_name in recipe_names:
dispname, typ, docname, anchor = recipes[recipe_name]
content[ingredient].append(
(dispname, 0, docname, anchor, docname, '', typ))
content = sorted(content.items())
return content, True
class RecipeIndex(Index):
name = 'recipe'
localname = 'Recipe Index'
shortname = 'Recipe'
def generate(self, docnames=None):
content = defaultdict(list)
recipes = self.domain.get_objects()
recipes = sorted(recipes, key=lambda recipe: recipe[0])
for name, dispname, typ, docname, anchor, _ in recipes:
content[dispname[0].lower()].append(
(dispname, 0, docname, anchor, docname, '', typ))
content = sorted(content.items())
return content, True
class RecipeDomain(Domain):
name = 'recipe'
label = 'Recipe Sample'
roles = {
'ref': XRefRole()
}
directives = {
'recipe': RecipeDirective,
}
indices = {
RecipeIndex,
IngredientIndex
}
initial_data = {
'recipes': [],
'recipe_ingredients': {},
}
def get_full_qualified_name(self, node):
return '{}.{}'.format('recipe', node.arguments[0])
def get_objects(self):
for obj in self.data['recipes']:
yield(obj)
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
match = [(docname, anchor)
for name, sig, typ, docname, anchor, prio
in self.get_objects() if sig == target]
if len(match) > 0:
todocname = match[0][0]
targ = match[0][1]
return make_refnode(builder, fromdocname, todocname, targ,
contnode, targ)
else:
print('Awww, found nothing')
return None
def add_recipe(self, signature, ingredients):
name = '{}.{}'.format('recipe', signature)
anchor = 'recipe-{}'.format(signature)
self.data['recipe_ingredients'][name] = ingredients
self.data['recipes'].append(
(name, signature, 'Recipe', self.env.docname, anchor, 0))
def setup(app):
app.add_domain(RecipeDomain)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true | true |
1c3389a940ec699819dd5b5b565bb6d39d01e0f5 | 5,286 | py | Python | tests/test_linux_aarch64_64.py | ngothan/py-cpuinfo | d8a9c9b095be55d5ceebf42497f099039fe8c348 | [
"MIT"
] | null | null | null | tests/test_linux_aarch64_64.py | ngothan/py-cpuinfo | d8a9c9b095be55d5ceebf42497f099039fe8c348 | [
"MIT"
] | null | null | null | tests/test_linux_aarch64_64.py | ngothan/py-cpuinfo | d8a9c9b095be55d5ceebf42497f099039fe8c348 | [
"MIT"
] | null | null | null |
import unittest
from cpuinfo import *
import helpers
class MockDataSource(object):
bits = '64bit'
cpu_count = 6
is_windows = False
arch_string_raw = 'aarch64'
uname_string_raw = ''
can_cpuid = False
@staticmethod
def has_proc_cpuinfo():
return True
@staticmethod
def has_lscpu():
return True
@staticmethod
def cat_proc_cpuinfo():
returncode = 0
output = '''
processor : 90
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 91
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 92
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 93
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 94
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 95
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
'''
return returncode, output
@staticmethod
def lscpu():
returncode = 0
output = '''
Architecture: aarch64
Byte Order: Little Endian
CPU(s): 96
On-line CPU(s) list: 0-95
Thread(s) per core: 1
Core(s) per socket: 48
Socket(s): 2
NUMA node(s): 2
L1d cache: 32K
L1i cache: 78K
L2 cache: 16384K
NUMA node0 CPU(s): 0-47
NUMA node1 CPU(s): 48-95
'''
return returncode, output
class TestLinux_Aarch_64(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
helpers.monkey_patch_data_source(cpuinfo, MockDataSource)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
'''
Make sure calls return the expected number of fields.
'''
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
self.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_internal()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
self.assertEqual('78 KB', info['l1_instruction_cache_size'])
self.assertEqual('32 KB', info['l1_data_cache_size'])
self.assertEqual('16384 KB', info['l2_cache_size'])
self.assertEqual(3, len(info))
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
self.assertEqual(
['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',
'fp', 'pmull', 'sha1', 'sha2']
,
info['flags']
)
@unittest.skip("FIXME: This fails because it does not have a way to get CPU brand string and Hz.")
def test_all(self):
info = cpuinfo._get_cpu_info_internal()
self.assertEqual('', info['vendor_id_raw'])
self.assertEqual('FIXME', info['hardware_raw'])
self.assertEqual('FIXME', info['brand_raw'])
self.assertEqual('FIXME', info['hz_advertised_friendly'])
self.assertEqual('FIXME', info['hz_actual_friendly'])
self.assertEqual((1000000000, 0), info['hz_advertised'])
self.assertEqual((1000000000, 0), info['hz_actual'])
self.assertEqual('ARM_8', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(6, info['count'])
self.assertEqual('aarch64', info['arch_string_raw'])
self.assertEqual('78K', info['l1_instruction_cache_size'])
self.assertEqual('32K', info['l1_data_cache_size'])
self.assertEqual('16384K', info['l2_cache_size'])
self.assertEqual(0, info['l2_cache_line_size'])
self.assertEqual(0, info['l2_cache_associativity'])
self.assertEqual('', info['l3_cache_size'])
self.assertEqual(0, info['stepping'])
self.assertEqual(0, info['model'])
self.assertEqual(0, info['family'])
self.assertEqual(0, info['processor_type'])
self.assertEqual(0, info['extended_model'])
self.assertEqual(0, info['extended_family'])
self.assertEqual(
['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',
'fp', 'pmull', 'sha1', 'sha2']
,
info['flags']
)
| 27.821053 | 99 | 0.694098 |
import unittest
from cpuinfo import *
import helpers
class MockDataSource(object):
bits = '64bit'
cpu_count = 6
is_windows = False
arch_string_raw = 'aarch64'
uname_string_raw = ''
can_cpuid = False
@staticmethod
def has_proc_cpuinfo():
return True
@staticmethod
def has_lscpu():
return True
@staticmethod
def cat_proc_cpuinfo():
returncode = 0
output = '''
processor : 90
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 91
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 92
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 93
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 94
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
processor : 95
BogoMIPS : 200.00
Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
CPU implementer : 0x43
CPU architecture: 8
CPU variant : 0x1
CPU part : 0x0a1
CPU revision : 0
'''
return returncode, output
@staticmethod
def lscpu():
returncode = 0
output = '''
Architecture: aarch64
Byte Order: Little Endian
CPU(s): 96
On-line CPU(s) list: 0-95
Thread(s) per core: 1
Core(s) per socket: 48
Socket(s): 2
NUMA node(s): 2
L1d cache: 32K
L1i cache: 78K
L2 cache: 16384K
NUMA node0 CPU(s): 0-47
NUMA node1 CPU(s): 48-95
'''
return returncode, output
class TestLinux_Aarch_64(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
helpers.monkey_patch_data_source(cpuinfo, MockDataSource)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
self.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_internal()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
self.assertEqual('78 KB', info['l1_instruction_cache_size'])
self.assertEqual('32 KB', info['l1_data_cache_size'])
self.assertEqual('16384 KB', info['l2_cache_size'])
self.assertEqual(3, len(info))
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
self.assertEqual(
['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',
'fp', 'pmull', 'sha1', 'sha2']
,
info['flags']
)
@unittest.skip("FIXME: This fails because it does not have a way to get CPU brand string and Hz.")
def test_all(self):
info = cpuinfo._get_cpu_info_internal()
self.assertEqual('', info['vendor_id_raw'])
self.assertEqual('FIXME', info['hardware_raw'])
self.assertEqual('FIXME', info['brand_raw'])
self.assertEqual('FIXME', info['hz_advertised_friendly'])
self.assertEqual('FIXME', info['hz_actual_friendly'])
self.assertEqual((1000000000, 0), info['hz_advertised'])
self.assertEqual((1000000000, 0), info['hz_actual'])
self.assertEqual('ARM_8', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(6, info['count'])
self.assertEqual('aarch64', info['arch_string_raw'])
self.assertEqual('78K', info['l1_instruction_cache_size'])
self.assertEqual('32K', info['l1_data_cache_size'])
self.assertEqual('16384K', info['l2_cache_size'])
self.assertEqual(0, info['l2_cache_line_size'])
self.assertEqual(0, info['l2_cache_associativity'])
self.assertEqual('', info['l3_cache_size'])
self.assertEqual(0, info['stepping'])
self.assertEqual(0, info['model'])
self.assertEqual(0, info['family'])
self.assertEqual(0, info['processor_type'])
self.assertEqual(0, info['extended_model'])
self.assertEqual(0, info['extended_family'])
self.assertEqual(
['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',
'fp', 'pmull', 'sha1', 'sha2']
,
info['flags']
)
| true | true |
1c338a3c880b366bde0f0b55bed72fa47ca41f96 | 3,267 | py | Python | atari/dqn/replay_memory.py | podondra/roboschool-rl | 2e6d6b1302eaa9aea12ebd81e2ad7a22d29a8d69 | [
"MIT"
] | 2 | 2018-03-06T21:26:34.000Z | 2021-12-22T12:31:47.000Z | atari/dqn/replay_memory.py | podondra/roboschool-rl | 2e6d6b1302eaa9aea12ebd81e2ad7a22d29a8d69 | [
"MIT"
] | 4 | 2019-12-16T20:37:48.000Z | 2020-03-30T20:08:36.000Z | atari/dqn/replay_memory.py | podondra/roboschool-rl | 2e6d6b1302eaa9aea12ebd81e2ad7a22d29a8d69 | [
"MIT"
] | 2 | 2018-03-06T21:45:19.000Z | 2018-04-06T20:53:15.000Z | import numpy
import tensorflow
from collections import deque
class Preprocessor:
def __init__(self):
self.img = tensorflow.placeholder(
tensorflow.uint8,
shape=[210, 160, 3]
)
gray_img = tensorflow.image.rgb_to_grayscale(self.img)
resize_img = tensorflow.image.resize_images(
gray_img,
[84, 84],
method=tensorflow.image.ResizeMethod.BILINEAR
)
reshape_img = tensorflow.reshape(resize_img, [84, 84])
self.preprocessor = reshape_img
def __call__(self, S, sess):
return sess.run(self.preprocessor, {self.img: S})
class ReplayMemory:
# there is size of 1M in the Nature's paper
REPLAY_MEMORY_SIZE = 1000000
REPLAY_START_SIZE = 50000
AGENT_HISTORY_LEN = 4
MINIBATCH_SIZE = 32
def __init__(self, env, preprocess, sess):
"""Initialize replay memory.
Initialize deque for Python's collections to REPLAY_MEMORY_SIZE size
and populate it with random experience.
env is OpenAI gym environment,
preprocess is instance of Preprocessor class,
sess is TensorFlow session.
"""
self.D = deque(maxlen=self.REPLAY_MEMORY_SIZE)
self.populate_memory(env, preprocess, sess)
def __len__(self):
return len(self.D)
def __getitem__(self, index):
return self.D[index]
def populate_memory(self, env, preprocess, sess):
"""Populate replay memory with random experience."""
S = env.reset()
theta = preprocess(S, sess)
while len(self.D) < self.REPLAY_START_SIZE:
A = env.action_space.sample()
S_next, R, done, _ = env.step(A)
theta_next = preprocess(S_next, sess)
self.store(theta, A, R)
S, theta = S_next, theta_next
if done and len(self.D) < self.REPLAY_START_SIZE:
# store terminal state with action as -1 and zero reward
self.store(theta, -1, 0)
S = env.reset()
theta = preprocess(S, sess)
def store(self, theta, A, R):
"""Store transition in replay memory."""
self.D.append((theta, A, R))
def get_state(self, index):
return numpy.stack(
[self.D[i][0] for i in range(index - 3, index + 1)],
axis=-1
)
def get_recent_state(self):
return self.get_state(len(self.D) - 1)
def get_transition(self, index):
return (
self.get_state(index - 1),
self.D[index][1],
self.D[index][2],
self.get_state(index)
)
def sample_minibatch(self):
"""Sample random minibatch of size 32 from replay memory."""
indexes = numpy.random.randint(
low=self.AGENT_HISTORY_LEN, high=len(self.D),
size=self.MINIBATCH_SIZE
)
# TODO handle terminal states correctly
# for example there should not be terminal state
# in the AGENT_HISTORY_LEN frames
# TODO numpy.stack for demands of gradient computation
return [self.get_transition(index) for index in indexes]
| 33 | 76 | 0.584328 | import numpy
import tensorflow
from collections import deque
class Preprocessor:
def __init__(self):
self.img = tensorflow.placeholder(
tensorflow.uint8,
shape=[210, 160, 3]
)
gray_img = tensorflow.image.rgb_to_grayscale(self.img)
resize_img = tensorflow.image.resize_images(
gray_img,
[84, 84],
method=tensorflow.image.ResizeMethod.BILINEAR
)
reshape_img = tensorflow.reshape(resize_img, [84, 84])
self.preprocessor = reshape_img
def __call__(self, S, sess):
return sess.run(self.preprocessor, {self.img: S})
class ReplayMemory:
REPLAY_MEMORY_SIZE = 1000000
REPLAY_START_SIZE = 50000
AGENT_HISTORY_LEN = 4
MINIBATCH_SIZE = 32
def __init__(self, env, preprocess, sess):
self.D = deque(maxlen=self.REPLAY_MEMORY_SIZE)
self.populate_memory(env, preprocess, sess)
def __len__(self):
return len(self.D)
def __getitem__(self, index):
return self.D[index]
def populate_memory(self, env, preprocess, sess):
S = env.reset()
theta = preprocess(S, sess)
while len(self.D) < self.REPLAY_START_SIZE:
A = env.action_space.sample()
S_next, R, done, _ = env.step(A)
theta_next = preprocess(S_next, sess)
self.store(theta, A, R)
S, theta = S_next, theta_next
if done and len(self.D) < self.REPLAY_START_SIZE:
# store terminal state with action as -1 and zero reward
self.store(theta, -1, 0)
S = env.reset()
theta = preprocess(S, sess)
def store(self, theta, A, R):
self.D.append((theta, A, R))
def get_state(self, index):
return numpy.stack(
[self.D[i][0] for i in range(index - 3, index + 1)],
axis=-1
)
def get_recent_state(self):
return self.get_state(len(self.D) - 1)
def get_transition(self, index):
return (
self.get_state(index - 1),
self.D[index][1],
self.D[index][2],
self.get_state(index)
)
def sample_minibatch(self):
indexes = numpy.random.randint(
low=self.AGENT_HISTORY_LEN, high=len(self.D),
size=self.MINIBATCH_SIZE
)
# TODO handle terminal states correctly
# for example there should not be terminal state
# in the AGENT_HISTORY_LEN frames
# TODO numpy.stack for demands of gradient computation
return [self.get_transition(index) for index in indexes]
| true | true |
1c338b26f0776a4698a5f37082f87bdc9110edbf | 33,115 | py | Python | test/functional/rpc_fundrawtransaction.py | shadow-42/litecoincash | 965c806041f8e425ab5b9c8b979cf100c0ecf740 | [
"MIT"
] | 78 | 2018-02-20T00:38:43.000Z | 2022-03-11T04:11:04.000Z | test/functional/rpc_fundrawtransaction.py | shadow-42/litecoincash | 965c806041f8e425ab5b9c8b979cf100c0ecf740 | [
"MIT"
] | 39 | 2018-02-20T13:36:27.000Z | 2021-11-30T23:38:34.000Z | test/functional/rpc_fundrawtransaction.py | shadow-42/litecoincash | 965c806041f8e425ab5b9c8b979cf100c0ecf740 | [
"MIT"
] | 77 | 2018-02-20T00:45:12.000Z | 2022-02-14T21:21:15.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid litecoincash address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 45.177353 | 223 | 0.569651 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
ransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| true | true |
1c338ee827f3d72ea745ec0b86a9646c7ce8fc3e | 219 | py | Python | Code/bindings.py | akankshadiwedy/t2wml | 02f60611eec19d10a92fd2cb06f07339cd2cb269 | [
"MIT"
] | null | null | null | Code/bindings.py | akankshadiwedy/t2wml | 02f60611eec19d10a92fd2cb06f07339cd2cb269 | [
"MIT"
] | null | null | null | Code/bindings.py | akankshadiwedy/t2wml | 02f60611eec19d10a92fd2cb06f07339cd2cb269 | [
"MIT"
] | null | null | null | bindings = {
"$col": None,
"$row": None,
"$left": None,
"$right": None,
"$top": None,
"$bottom": None,
"excel_sheet": None,
"item_table": None,
"created_by": None,
"code": None
}
| 16.846154 | 24 | 0.488584 | bindings = {
"$col": None,
"$row": None,
"$left": None,
"$right": None,
"$top": None,
"$bottom": None,
"excel_sheet": None,
"item_table": None,
"created_by": None,
"code": None
}
| true | true |
1c3391056e78312935375ca84db34f34357fc24e | 3,314 | py | Python | src/utils.py | jcboyd/cyclegan-roi | f0c80c6122d17406f5282f58ea09abaf2b70c388 | [
"BSD-2-Clause"
] | null | null | null | src/utils.py | jcboyd/cyclegan-roi | f0c80c6122d17406f5282f58ea09abaf2b70c388 | [
"BSD-2-Clause"
] | null | null | null | src/utils.py | jcboyd/cyclegan-roi | f0c80c6122d17406f5282f58ea09abaf2b70c388 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import torch
from torch.nn import UpsamplingNearest2d, UpsamplingBilinear2d
from rectpack import newPacker
def get_mnist_canvas(images, labels, nb_classes=10, dim=128):
canvas = -torch.ones((dim, dim))
noise_canvas = torch.zeros((nb_classes, dim, dim))
condition_canvas = torch.zeros((nb_classes, dim, dim))
num_objs, h, w = images.shape
y, x = (torch.randint(0, dim - h, size=(num_objs, 1)),
torch.randint(0, dim - w, size=(num_objs, 1)))
bboxes = torch.cat([x, y, x + w, y + h], axis=1)
for i, (x1, y1, x2, y2) in enumerate(bboxes):
canvas[y1:y2, x1:x2] = torch.max(canvas[y1:y2, x1:x2],
images[i].squeeze())
z = torch.randn(1, 1, w // 4, h // 4)
z = UpsamplingNearest2d(scale_factor=4)(z)
noise_canvas[labels[i], y1:y2, x1:x2] = z.squeeze()
condition_canvas[labels[i], y1:y2, x1:x2] = torch.ones((h, w))
#bboxes = torch.cat([bboxes, labels[:, None]], axis=1)
return canvas, noise_canvas, condition_canvas, bboxes
def get_mnist_knapsack(images, labels, nb_classes=10, dim=128):
bboxes = []
canvas = -torch.ones((dim, dim))
noise_canvas = torch.zeros((nb_classes, dim, dim))
condition_canvas = torch.zeros((nb_classes, dim, dim))
hs, ws = 28 + 5 * np.random.randn(2, images.shape[0])
hs = np.clip(hs, 14, 48).astype('int')
ws = np.clip(ws, 14, 48).astype('int')
rectangles = list(zip(hs, hs))
bins = [(128, 128)]
packer = newPacker()
# Add the rectangles to packing queue
for r in rectangles:
packer.add_rect(*r)
# Add the bins where the rectangles will be placed
for b in bins:
packer.add_bin(*b)
# Start packing
packer.pack()
for i, rect in enumerate(packer.rect_list()):
_, x, y, w, h, _ = rect
scaled_crop = UpsamplingBilinear2d(size=(h, w))(images[i][None, None])
canvas[y:y+h, x:x+w] = torch.max(canvas[y:y+h, x:x+w], scaled_crop)
z = torch.randn(1, 1, 7, 7)
z = UpsamplingNearest2d(size=(h, w))(z)
noise_canvas[labels[i], y:y+h, x:x+w] = z
condition_canvas[labels[i], y:y+h, x:x+w] = torch.ones((h, w))
bboxes.append([x, y, x + w, y + h])
return canvas, noise_canvas, condition_canvas, torch.Tensor(bboxes)
def mnist_canvas_generator(x_data, y_data, nb_batch, nb_obj, knapsack):
f_canvas = get_mnist_knapsack if knapsack else get_mnist_canvas
while True:
batch_idx = torch.randint(x_data.shape[0], size=(nb_batch, nb_obj))
data = [f_canvas(x_data[idx], y_data[idx]) for idx in batch_idx]
canvas_batch = torch.cat([canvas[None, None] for canvas, _, _, _ in data])
noise_batch = torch.cat([noise[None] for _, noise, _, _ in data])
condition_batch = torch.cat([condition[None] for _, _, condition, _ in data])
# bbox_batch = [torch.cat([i * torch.ones(nb_obj, 1), bboxes], axis=1)
# for i, (_, _, _, bboxes) in enumerate(data)]
bbox_batch = [torch.cat([i * torch.ones(bboxes.shape[0], 1), bboxes], axis=1)
for i, (_, _, _, bboxes) in enumerate(data)]
bbox_batch = torch.cat(bbox_batch, axis=0)
yield canvas_batch, noise_batch, condition_batch, bbox_batch
| 31.561905 | 85 | 0.610441 | import numpy as np
import torch
from torch.nn import UpsamplingNearest2d, UpsamplingBilinear2d
from rectpack import newPacker
def get_mnist_canvas(images, labels, nb_classes=10, dim=128):
canvas = -torch.ones((dim, dim))
noise_canvas = torch.zeros((nb_classes, dim, dim))
condition_canvas = torch.zeros((nb_classes, dim, dim))
num_objs, h, w = images.shape
y, x = (torch.randint(0, dim - h, size=(num_objs, 1)),
torch.randint(0, dim - w, size=(num_objs, 1)))
bboxes = torch.cat([x, y, x + w, y + h], axis=1)
for i, (x1, y1, x2, y2) in enumerate(bboxes):
canvas[y1:y2, x1:x2] = torch.max(canvas[y1:y2, x1:x2],
images[i].squeeze())
z = torch.randn(1, 1, w // 4, h // 4)
z = UpsamplingNearest2d(scale_factor=4)(z)
noise_canvas[labels[i], y1:y2, x1:x2] = z.squeeze()
condition_canvas[labels[i], y1:y2, x1:x2] = torch.ones((h, w))
return canvas, noise_canvas, condition_canvas, bboxes
def get_mnist_knapsack(images, labels, nb_classes=10, dim=128):
bboxes = []
canvas = -torch.ones((dim, dim))
noise_canvas = torch.zeros((nb_classes, dim, dim))
condition_canvas = torch.zeros((nb_classes, dim, dim))
hs, ws = 28 + 5 * np.random.randn(2, images.shape[0])
hs = np.clip(hs, 14, 48).astype('int')
ws = np.clip(ws, 14, 48).astype('int')
rectangles = list(zip(hs, hs))
bins = [(128, 128)]
packer = newPacker()
for r in rectangles:
packer.add_rect(*r)
for b in bins:
packer.add_bin(*b)
packer.pack()
for i, rect in enumerate(packer.rect_list()):
_, x, y, w, h, _ = rect
scaled_crop = UpsamplingBilinear2d(size=(h, w))(images[i][None, None])
canvas[y:y+h, x:x+w] = torch.max(canvas[y:y+h, x:x+w], scaled_crop)
z = torch.randn(1, 1, 7, 7)
z = UpsamplingNearest2d(size=(h, w))(z)
noise_canvas[labels[i], y:y+h, x:x+w] = z
condition_canvas[labels[i], y:y+h, x:x+w] = torch.ones((h, w))
bboxes.append([x, y, x + w, y + h])
return canvas, noise_canvas, condition_canvas, torch.Tensor(bboxes)
def mnist_canvas_generator(x_data, y_data, nb_batch, nb_obj, knapsack):
f_canvas = get_mnist_knapsack if knapsack else get_mnist_canvas
while True:
batch_idx = torch.randint(x_data.shape[0], size=(nb_batch, nb_obj))
data = [f_canvas(x_data[idx], y_data[idx]) for idx in batch_idx]
canvas_batch = torch.cat([canvas[None, None] for canvas, _, _, _ in data])
noise_batch = torch.cat([noise[None] for _, noise, _, _ in data])
condition_batch = torch.cat([condition[None] for _, _, condition, _ in data])
bbox_batch = [torch.cat([i * torch.ones(bboxes.shape[0], 1), bboxes], axis=1)
for i, (_, _, _, bboxes) in enumerate(data)]
bbox_batch = torch.cat(bbox_batch, axis=0)
yield canvas_batch, noise_batch, condition_batch, bbox_batch
| true | true |
1c33916ee264c79fbaa5784392a842d3ae69941f | 1,791 | py | Python | uri/1048_aumento_salario.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | uri/1048_aumento_salario.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | uri/1048_aumento_salario.py | thekilian/Python-pratica | 875661addd5b8eb4364bc638832c7ab55dcefce4 | [
"MIT"
] | null | null | null | '''
A empresa ABC resolveu conceder um aumento de salários a seus funcionários de acordo com a tabela abaixo:
| Salário | Percentual de reajuste |
| ----------------- | ---------------------- |
| 0 - 400.00 | 15% |
| 400.01 - 800.00 | 12% |
| 800.01 - 1200.00 | 10% |
| 1200.01 - 2000.00 | 7% |
| Acima de 2000.00 | 4% |
Leia o salário do funcionário e calcule e mostre o novo salário, bem como o valor de reajuste ganho e o índice reajustado, em percentual.
| Input Sample | Output Samples |
| ------------ | ---------------------- |
| 400.00 | Novo salario: 460.00 |
| | Reajuste ganho: 60.00 |
| | Em percentual: 15 % |
| ------------ | ---------------------- |
| 800.01 | Novo salario: 880.01 |
| | Reajuste ganho: 80.00 |
| | Em percentual: 10 % |
| ------------ | ---------------------- |
| 2000.00 | Novo salario: 2140.00 |
| | Reajuste ganho: 140.00 |
| | Em percentual: 7 % |
'''
# Aplicar um desconto de 20% nada mais é que multiplicar o valor por 0,2
salario = float(input())
if salario > 0 and salario <= 400.00:
reaj = salario * 0.15
perc = 15
elif salario >= 400.01 and salario <= 800.00:
reaj = salario * 0.12
perc = 12
elif salario >= 800.01 and salario <= 1200.00:
reaj = salario * 0.1
perc = 10
elif salario >= 1200.01 and salario <= 2000.00:
reaj = salario * 0.07
perc = 7
elif salario > 2000.00:
reaj = salario * 0.04
perc = 4
novo = salario + reaj
print("Novo salario: {:.2f}".format(novo))
print("Reajuste ganho: {:.2f}".format(reaj))
print("Em percentual: {} %".format(perc)) | 33.792453 | 137 | 0.487437 |
salario = float(input())
if salario > 0 and salario <= 400.00:
reaj = salario * 0.15
perc = 15
elif salario >= 400.01 and salario <= 800.00:
reaj = salario * 0.12
perc = 12
elif salario >= 800.01 and salario <= 1200.00:
reaj = salario * 0.1
perc = 10
elif salario >= 1200.01 and salario <= 2000.00:
reaj = salario * 0.07
perc = 7
elif salario > 2000.00:
reaj = salario * 0.04
perc = 4
novo = salario + reaj
print("Novo salario: {:.2f}".format(novo))
print("Reajuste ganho: {:.2f}".format(reaj))
print("Em percentual: {} %".format(perc)) | true | true |
1c3391bb5056d6b61b8866c71a4dff48e53c8011 | 14,150 | py | Python | .ci/gitlab/template.ci.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | null | null | null | .ci/gitlab/template.ci.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 36 | 2020-06-12T07:36:49.000Z | 2022-03-29T14:06:48.000Z | .ci/gitlab/template.ci.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import os
import jinja2
from pathlib import Path # python3 only
from dotenv import dotenv_values
import sys
import gitlab
from itertools import product
tpl = r'''# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
# Edit and Re-run .ci/gitlab/template.ci.py instead #
stages:
- sanity
- test
- build
- install_checks
- deploy
{% macro never_on_schedule_rule(exclude_github=False) -%}
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
{%- if exclude_github %}
- if: $CI_COMMIT_REF_NAME =~ /^github.*/
when: never
{%- endif %}
- when: on_success
{%- endmacro -%}
#************ definition of base jobs *********************************************************************************#
.test_base:
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
- api_failure
tags:
- autoscaling
rules:
- if: $CI_COMMIT_REF_NAME =~ /^staging.*/
when: never
- when: on_success
variables:
PYPI_MIRROR_TAG: {{pypi_mirror_tag}}
CI_IMAGE_TAG: {{ci_image_tag}}
PYMOR_HYPOTHESIS_PROFILE: ci
PYMOR_PYTEST_EXTRA: ""
BINDERIMAGE: ${CI_REGISTRY_IMAGE}/binder:${CI_COMMIT_REF_SLUG}
.pytest:
extends: .test_base
tags:
- long execution time
- autoscaling
environment:
name: unsafe
stage: test
after_script:
- .ci/gitlab/after_script.bash
cache:
key: same_db_on_all_runners
paths:
- .hypothesis
artifacts:
when: always
name: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
expire_in: 3 months
paths:
- src/pymortests/testdata/check_results/*/*_changed
- docs/source/*_extracted.py
- coverage*
- memory_usage.txt
- .hypothesis
- test_results*.xml
{# note: only Vanilla and numpy runs generate coverage or test_results so we can skip others entirely here #}
.submit:
extends: .test_base
image: {{registry}}/pymor/ci_sanity:{{ci_image_tag}}
variables:
XDG_CACHE_DIR: /tmp
retry:
max: 2
when:
- always
environment:
name: safe
{{ never_on_schedule_rule(exclude_github=True) }}
stage: deploy
script: .ci/gitlab/submit.bash
.docker-in-docker:
tags:
- docker-in-docker
- autoscaling
extends: .test_base
timeout: 45 minutes
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
- api_failure
- unknown_failure
- job_execution_timeout
{# this is intentionally NOT moving with CI_IMAGE_TAG #}
image: {{registry}}/pymor/docker-in-docker:2021.1.0
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
before_script:
- 'export SHARED_PATH="${CI_PROJECT_DIR}/shared"'
- mkdir -p ${SHARED_PATH}
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
services:
- name: {{registry}}/docker:dind
alias: docker
environment:
name: unsafe
# this should ensure binderhubs can still build a runnable image from our repo
.binder:
extends: .docker-in-docker
stage: install_checks
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
variables:
USER: juno
.check_wheel:
extends: .test_base
stage: install_checks
{{ never_on_schedule_rule() }}
services:
- name: {{registry}}/pymor/devpi:{{pypi_mirror_tag}}
alias: pymor__devpi
before_script:
# bump to our minimal version
- python3 -m pip install devpi-client
- devpi use http://pymor__devpi:3141/root/public --set-cfg
- devpi login root --password ''
- devpi upload --from-dir --formats=* ./dist/*.whl
# the docker service adressing fails on other runners
tags: [mike]
.sanity_checks:
extends: .test_base
image: {{registry}}/pymor/ci_sanity:{{ci_image_tag}}
stage: sanity
#******** end definition of base jobs *********************************************************************************#
# https://docs.gitlab.com/ee/ci/yaml/README.html#workflowrules-templates
include:
- template: 'Workflows/Branch-Pipelines.gitlab-ci.yml'
#******* sanity stage
# this step makes sure that on older python our install fails with
# a nice message ala "python too old" instead of "SyntaxError"
verify setup.py:
extends: .sanity_checks
script:
- python3 setup.py egg_info
ci setup:
extends: .sanity_checks
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/ci_sanity_check.bash "{{ ' '.join(pythons) }}"
#****** test stage
{%- for script, py, para in matrix %}
{{script}} {{py[0]}} {{py[2]}}:
extends: .pytest
{{ never_on_schedule_rule() }}
variables:
COVERAGE_FILE: coverage_{{script}}__{{py}}
{%- if script == "mpi" %}
retry:
max: 2
when: always
{%- endif %}
services:
{%- if script == "oldest" %}
- name: {{registry}}/pymor/pypi-mirror_oldest_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
{%- elif script in ["pip_installed", "numpy_git"] %}
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
{%- endif %}
image: {{registry}}/pymor/testing_py{{py}}:{{ci_image_tag}}
script:
- |
if [[ "$CI_COMMIT_REF_NAME" == *"github/PR_"* ]]; then
echo selecting hypothesis profile "ci_pr" for branch $CI_COMMIT_REF_NAME
export PYMOR_HYPOTHESIS_PROFILE="ci_pr"
else
echo selecting hypothesis profile "ci" for branch $CI_COMMIT_REF_NAME
export PYMOR_HYPOTHESIS_PROFILE="ci"
fi
- ./.ci/gitlab/test_{{script}}.bash
{%- endfor %}
{%- for py in pythons %}
ci_weekly {{py[0]}} {{py[2]}}:
extends: .pytest
timeout: 5h
variables:
COVERAGE_FILE: coverage_ci_weekly
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
image: {{registry}}/pymor/testing_py{{py}}:{{ci_image_tag}}
{# PYMOR_HYPOTHESIS_PROFILE is overwritten from web schedule settings #}
script: ./.ci/gitlab/test_vanilla.bash
{%- endfor %}
submit coverage:
extends: .submit
artifacts:
when: always
name: "submit"
paths:
- cover/*
- .coverage
dependencies:
{%- for script, py, para in matrix if script in ['vanilla', 'oldest', 'numpy_git', 'mpi'] %}
- {{script}} {{py[0]}} {{py[2]}}
{%- endfor %}
{%- for py in pythons %}
submit ci_weekly {{py[0]}} {{py[2]}}:
extends: .submit
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
dependencies:
- ci_weekly {{py[0]}} {{py[2]}}
needs: ["ci_weekly {{py[0]}} {{py[2]}}"]
{%- endfor %}
{% for OS, PY in testos %}
from source {{loop.index}}/{{loop.length}}:
tags: [mike]
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{PY}}:{{pypi_mirror_tag}}
alias: pypi_mirror
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
stage: install_checks
image: {{registry}}/pymor/deploy_checks_{{OS}}:{{ci_image_tag}}
script: ./.ci/gitlab/install_checks/{{OS}}/check.bash
{% endfor %}
binder base image:
extends: .binder
stage: build
script:
- docker build --build-arg CI_IMAGE_TAG=${CI_IMAGE_TAG} -t ${BINDERIMAGE} -f .ci/gitlab/Dockerfile.binder.base .
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker run ${BINDERIMAGE} ipython -c "from pymor.basic import *"
- docker push ${BINDERIMAGE}
local docker:
extends: .binder
script:
- make docker_image
- make DOCKER_CMD="ipython -c 'from pymor.basic import *'" docker_exec
{% for url in binder_urls %}
trigger_binder {{loop.index}}/{{loop.length}}:
extends: .test_base
stage: deploy
image: {{registry}}/alpine:3.11
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: on_success
- if: $CI_COMMIT_TAG != null
when: on_success
before_script:
- apk --update add bash python3
- pip3 install requests
script:
- python3 .ci/gitlab/trigger_binder.py "{{url}}/${CI_COMMIT_REF}"
{% endfor %}
sdist_and_wheel:
extends: .sanity_checks
stage: build
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
artifacts:
paths:
- dist/pymor*.whl
- dist/pymor*.tar.gz
expire_in: 1 week
script: python3 -m build
pypi:
extends: .test_base
image: {{registry}}/pymor/python_3.9:{{ci_image_tag}}
stage: deploy
dependencies:
- sdist_and_wheel
{{ never_on_schedule_rule(exclude_github=True) }}
variables:
ARCHIVE_DIR: pyMOR_wheels-${CI_COMMIT_REF_NAME}
artifacts:
paths:
- ${CI_PROJECT_DIR}/${ARCHIVE_DIR}/pymor*whl
- ${CI_PROJECT_DIR}/${ARCHIVE_DIR}/pymor*tar.gz
expire_in: 6 months
name: pymor-wheels
before_script:
- apt update && apt install -y git
- pip3 install -r requirements.txt
- pip3 install twine
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/pypi_deploy.bash
environment:
name: safe
{% for OS, PY in testos %}
from wheel {{loop.index}}/{{loop.length}}:
extends: .check_wheel
dependencies: ["sdist_and_wheel"]
needs: ["sdist_and_wheel"]
image: {{registry}}/pymor/deploy_checks_{{OS}}:{{ci_image_tag}}
script:
- echo "Testing wheel install on {{OS}} with Python {{PY}}"
- python3 -m pip --version
- devpi install pymor[full]
{% endfor %}
{%- for py in pythons %}
docs build {{py[0]}} {{py[2]}}:
extends: .test_base
tags: [mike]
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- when: on_success
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
image: {{registry}}/pymor/jupyter_py{{py}}:{{ci_image_tag}}
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/test_docs.bash
stage: build
needs: ["ci setup"]
artifacts:
paths:
- docs/_build/html
- docs/error.log
{% endfor %}
docs:
extends: .docker-in-docker
# makes sure this doesn't land on the test runner
tags: [mike]
image: {{registry}}/alpine:3.11
stage: deploy
resource_group: docs_deploy
needs: ["docs build 3 7", "binder base image"]
dependencies: ["docs build 3 7", "binder base image"]
before_script:
- apk --update add make python3 bash
# chardet is a workaround for https://github.com/jupyterhub/repo2docker/issues/1063
- pip3 install jinja2 pathlib jupyter-repo2docker six chardet
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/deploy_docs.bash
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- if: $CI_COMMIT_REF_NAME =~ /^github\/PR_.*/
when: never
- when: on_success
environment:
name: safe
# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
# Edit and Re-run .ci/gitlab/template.ci.py instead #
'''
tpl = jinja2.Template(tpl)
pythons = ['3.7', '3.8', '3.9']
oldest = [pythons[0]]
newest = [pythons[-1]]
test_scripts = [
("mpi", pythons, 1),
("pip_installed", pythons, 1),
("tutorials", pythons, 1),
("vanilla", pythons, 1),
("numpy_git", newest, 1),
("oldest", oldest, 1),
("cpp_demo", pythons, 1),
]
# these should be all instances in the federation
binder_urls = [f'https://{sub}.mybinder.org/build/gh/pymor/pymor' for sub in ('gke', 'ovh', 'gesis')]
testos = [('fedora', '3.9'), ('debian-buster', '3.7'), ('debian-bullseye', '3.9')]
env_path = Path(os.path.dirname(__file__)) / '..' / '..' / '.env'
env = dotenv_values(env_path)
ci_image_tag = env['CI_IMAGE_TAG']
pypi_mirror_tag = env['PYPI_MIRROR_TAG']
registry = "zivgitlab.wwu.io/pymor/docker"
with open(os.path.join(os.path.dirname(__file__), 'ci.yml'), 'wt') as yml:
matrix = [(sc, py, pa) for sc, pythons, pa in test_scripts for py in pythons]
yml.write(tpl.render(**locals()))
try:
token = sys.argv[1]
except IndexError:
print("not checking image availability, no token given")
sys.exit(0)
print("Checking image availability\n")
gl = gitlab.Gitlab("https://zivgitlab.uni-muenster.de", private_token=token)
gl.auth()
pymor_id = 2758
pymor = gl.projects.get(pymor_id)
image_tag = ci_image_tag
mirror_tag = pypi_mirror_tag
images = ["testing", "jupyter"]
mirrors = [f"{r}_py{py}"
for r, py in product(["pypi-mirror_stable", "pypi-mirror_oldest"], pythons)]
images = [f"{r}_py{py}" for r, py in product(images, pythons)]
images += [f"deploy_checks_{os}" for os, _ in testos] + ["python_3.9"]
missing = set((r, mirror_tag) for r in mirrors) | set((r, image_tag) for r in images)
img_count = len(missing)
for repo in pymor.repositories.list(all=True):
wanted = None
match_name = repo.name.replace("pymor/", "")
if match_name in mirrors:
wanted = mirror_tag
elif match_name in images:
wanted = image_tag
if wanted:
try:
tag = repo.tags.get(id=wanted)
missing.remove((match_name, wanted))
except gitlab.exceptions.GitlabGetError:
continue
if len(missing):
try:
from rich.console import Console
from rich.table import Table
table = Table("image", "tag", title="Not found in Container Registry")
for el in sorted(missing):
table.add_row(*el)
console = Console()
console.print(table)
console.print(f"Missing {len(missing)} of {img_count} image:tag pairs")
except (ImportError, ModuleNotFoundError):
print(f"Missing {len(missing)} of {img_count} image:tag pairs")
print(missing)
sys.exit(1)
| 29.852321 | 120 | 0.60735 |
import os
import jinja2
from pathlib import Path
from dotenv import dotenv_values
import sys
import gitlab
from itertools import product
tpl = r'''# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
# Edit and Re-run .ci/gitlab/template.ci.py instead #
stages:
- sanity
- test
- build
- install_checks
- deploy
{% macro never_on_schedule_rule(exclude_github=False) -%}
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
{%- if exclude_github %}
- if: $CI_COMMIT_REF_NAME =~ /^github.*/
when: never
{%- endif %}
- when: on_success
{%- endmacro -%}
#************ definition of base jobs *********************************************************************************#
.test_base:
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
- api_failure
tags:
- autoscaling
rules:
- if: $CI_COMMIT_REF_NAME =~ /^staging.*/
when: never
- when: on_success
variables:
PYPI_MIRROR_TAG: {{pypi_mirror_tag}}
CI_IMAGE_TAG: {{ci_image_tag}}
PYMOR_HYPOTHESIS_PROFILE: ci
PYMOR_PYTEST_EXTRA: ""
BINDERIMAGE: ${CI_REGISTRY_IMAGE}/binder:${CI_COMMIT_REF_SLUG}
.pytest:
extends: .test_base
tags:
- long execution time
- autoscaling
environment:
name: unsafe
stage: test
after_script:
- .ci/gitlab/after_script.bash
cache:
key: same_db_on_all_runners
paths:
- .hypothesis
artifacts:
when: always
name: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
expire_in: 3 months
paths:
- src/pymortests/testdata/check_results/*/*_changed
- docs/source/*_extracted.py
- coverage*
- memory_usage.txt
- .hypothesis
- test_results*.xml
{# note: only Vanilla and numpy runs generate coverage or test_results so we can skip others entirely here #}
.submit:
extends: .test_base
image: {{registry}}/pymor/ci_sanity:{{ci_image_tag}}
variables:
XDG_CACHE_DIR: /tmp
retry:
max: 2
when:
- always
environment:
name: safe
{{ never_on_schedule_rule(exclude_github=True) }}
stage: deploy
script: .ci/gitlab/submit.bash
.docker-in-docker:
tags:
- docker-in-docker
- autoscaling
extends: .test_base
timeout: 45 minutes
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
- api_failure
- unknown_failure
- job_execution_timeout
{# this is intentionally NOT moving with CI_IMAGE_TAG #}
image: {{registry}}/pymor/docker-in-docker:2021.1.0
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
before_script:
- 'export SHARED_PATH="${CI_PROJECT_DIR}/shared"'
- mkdir -p ${SHARED_PATH}
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
services:
- name: {{registry}}/docker:dind
alias: docker
environment:
name: unsafe
# this should ensure binderhubs can still build a runnable image from our repo
.binder:
extends: .docker-in-docker
stage: install_checks
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
variables:
USER: juno
.check_wheel:
extends: .test_base
stage: install_checks
{{ never_on_schedule_rule() }}
services:
- name: {{registry}}/pymor/devpi:{{pypi_mirror_tag}}
alias: pymor__devpi
before_script:
# bump to our minimal version
- python3 -m pip install devpi-client
- devpi use http://pymor__devpi:3141/root/public --set-cfg
- devpi login root --password ''
- devpi upload --from-dir --formats=* ./dist/*.whl
# the docker service adressing fails on other runners
tags: [mike]
.sanity_checks:
extends: .test_base
image: {{registry}}/pymor/ci_sanity:{{ci_image_tag}}
stage: sanity
#******** end definition of base jobs *********************************************************************************#
# https://docs.gitlab.com/ee/ci/yaml/README.html#workflowrules-templates
include:
- template: 'Workflows/Branch-Pipelines.gitlab-ci.yml'
#******* sanity stage
# this step makes sure that on older python our install fails with
# a nice message ala "python too old" instead of "SyntaxError"
verify setup.py:
extends: .sanity_checks
script:
- python3 setup.py egg_info
ci setup:
extends: .sanity_checks
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/ci_sanity_check.bash "{{ ' '.join(pythons) }}"
#****** test stage
{%- for script, py, para in matrix %}
{{script}} {{py[0]}} {{py[2]}}:
extends: .pytest
{{ never_on_schedule_rule() }}
variables:
COVERAGE_FILE: coverage_{{script}}__{{py}}
{%- if script == "mpi" %}
retry:
max: 2
when: always
{%- endif %}
services:
{%- if script == "oldest" %}
- name: {{registry}}/pymor/pypi-mirror_oldest_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
{%- elif script in ["pip_installed", "numpy_git"] %}
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
{%- endif %}
image: {{registry}}/pymor/testing_py{{py}}:{{ci_image_tag}}
script:
- |
if [[ "$CI_COMMIT_REF_NAME" == *"github/PR_"* ]]; then
echo selecting hypothesis profile "ci_pr" for branch $CI_COMMIT_REF_NAME
export PYMOR_HYPOTHESIS_PROFILE="ci_pr"
else
echo selecting hypothesis profile "ci" for branch $CI_COMMIT_REF_NAME
export PYMOR_HYPOTHESIS_PROFILE="ci"
fi
- ./.ci/gitlab/test_{{script}}.bash
{%- endfor %}
{%- for py in pythons %}
ci_weekly {{py[0]}} {{py[2]}}:
extends: .pytest
timeout: 5h
variables:
COVERAGE_FILE: coverage_ci_weekly
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
image: {{registry}}/pymor/testing_py{{py}}:{{ci_image_tag}}
{# PYMOR_HYPOTHESIS_PROFILE is overwritten from web schedule settings #}
script: ./.ci/gitlab/test_vanilla.bash
{%- endfor %}
submit coverage:
extends: .submit
artifacts:
when: always
name: "submit"
paths:
- cover/*
- .coverage
dependencies:
{%- for script, py, para in matrix if script in ['vanilla', 'oldest', 'numpy_git', 'mpi'] %}
- {{script}} {{py[0]}} {{py[2]}}
{%- endfor %}
{%- for py in pythons %}
submit ci_weekly {{py[0]}} {{py[2]}}:
extends: .submit
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
dependencies:
- ci_weekly {{py[0]}} {{py[2]}}
needs: ["ci_weekly {{py[0]}} {{py[2]}}"]
{%- endfor %}
{% for OS, PY in testos %}
from source {{loop.index}}/{{loop.length}}:
tags: [mike]
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{PY}}:{{pypi_mirror_tag}}
alias: pypi_mirror
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
stage: install_checks
image: {{registry}}/pymor/deploy_checks_{{OS}}:{{ci_image_tag}}
script: ./.ci/gitlab/install_checks/{{OS}}/check.bash
{% endfor %}
binder base image:
extends: .binder
stage: build
script:
- docker build --build-arg CI_IMAGE_TAG=${CI_IMAGE_TAG} -t ${BINDERIMAGE} -f .ci/gitlab/Dockerfile.binder.base .
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker run ${BINDERIMAGE} ipython -c "from pymor.basic import *"
- docker push ${BINDERIMAGE}
local docker:
extends: .binder
script:
- make docker_image
- make DOCKER_CMD="ipython -c 'from pymor.basic import *'" docker_exec
{% for url in binder_urls %}
trigger_binder {{loop.index}}/{{loop.length}}:
extends: .test_base
stage: deploy
image: {{registry}}/alpine:3.11
rules:
- if: $CI_COMMIT_REF_NAME == "main"
when: on_success
- if: $CI_COMMIT_TAG != null
when: on_success
before_script:
- apk --update add bash python3
- pip3 install requests
script:
- python3 .ci/gitlab/trigger_binder.py "{{url}}/${CI_COMMIT_REF}"
{% endfor %}
sdist_and_wheel:
extends: .sanity_checks
stage: build
needs: ["ci setup"]
{{ never_on_schedule_rule() }}
artifacts:
paths:
- dist/pymor*.whl
- dist/pymor*.tar.gz
expire_in: 1 week
script: python3 -m build
pypi:
extends: .test_base
image: {{registry}}/pymor/python_3.9:{{ci_image_tag}}
stage: deploy
dependencies:
- sdist_and_wheel
{{ never_on_schedule_rule(exclude_github=True) }}
variables:
ARCHIVE_DIR: pyMOR_wheels-${CI_COMMIT_REF_NAME}
artifacts:
paths:
- ${CI_PROJECT_DIR}/${ARCHIVE_DIR}/pymor*whl
- ${CI_PROJECT_DIR}/${ARCHIVE_DIR}/pymor*tar.gz
expire_in: 6 months
name: pymor-wheels
before_script:
- apt update && apt install -y git
- pip3 install -r requirements.txt
- pip3 install twine
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/pypi_deploy.bash
environment:
name: safe
{% for OS, PY in testos %}
from wheel {{loop.index}}/{{loop.length}}:
extends: .check_wheel
dependencies: ["sdist_and_wheel"]
needs: ["sdist_and_wheel"]
image: {{registry}}/pymor/deploy_checks_{{OS}}:{{ci_image_tag}}
script:
- echo "Testing wheel install on {{OS}} with Python {{PY}}"
- python3 -m pip --version
- devpi install pymor[full]
{% endfor %}
{%- for py in pythons %}
docs build {{py[0]}} {{py[2]}}:
extends: .test_base
tags: [mike]
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- when: on_success
services:
- name: {{registry}}/pymor/pypi-mirror_stable_py{{py}}:{{pypi_mirror_tag}}
alias: pypi_mirror
image: {{registry}}/pymor/jupyter_py{{py}}:{{ci_image_tag}}
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/test_docs.bash
stage: build
needs: ["ci setup"]
artifacts:
paths:
- docs/_build/html
- docs/error.log
{% endfor %}
docs:
extends: .docker-in-docker
# makes sure this doesn't land on the test runner
tags: [mike]
image: {{registry}}/alpine:3.11
stage: deploy
resource_group: docs_deploy
needs: ["docs build 3 7", "binder base image"]
dependencies: ["docs build 3 7", "binder base image"]
before_script:
- apk --update add make python3 bash
# chardet is a workaround for https://github.com/jupyterhub/repo2docker/issues/1063
- pip3 install jinja2 pathlib jupyter-repo2docker six chardet
script:
- ${CI_PROJECT_DIR}/.ci/gitlab/deploy_docs.bash
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- if: $CI_COMMIT_REF_NAME =~ /^github\/PR_.*/
when: never
- when: on_success
environment:
name: safe
# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
# Edit and Re-run .ci/gitlab/template.ci.py instead #
'''
tpl = jinja2.Template(tpl)
pythons = ['3.7', '3.8', '3.9']
oldest = [pythons[0]]
newest = [pythons[-1]]
test_scripts = [
("mpi", pythons, 1),
("pip_installed", pythons, 1),
("tutorials", pythons, 1),
("vanilla", pythons, 1),
("numpy_git", newest, 1),
("oldest", oldest, 1),
("cpp_demo", pythons, 1),
]
# these should be all instances in the federation
binder_urls = [f'https://{sub}.mybinder.org/build/gh/pymor/pymor' for sub in ('gke', 'ovh', 'gesis')]
testos = [('fedora', '3.9'), ('debian-buster', '3.7'), ('debian-bullseye', '3.9')]
env_path = Path(os.path.dirname(__file__)) / '..' / '..' / '.env'
env = dotenv_values(env_path)
ci_image_tag = env['CI_IMAGE_TAG']
pypi_mirror_tag = env['PYPI_MIRROR_TAG']
registry = "zivgitlab.wwu.io/pymor/docker"
with open(os.path.join(os.path.dirname(__file__), 'ci.yml'), 'wt') as yml:
matrix = [(sc, py, pa) for sc, pythons, pa in test_scripts for py in pythons]
yml.write(tpl.render(**locals()))
try:
token = sys.argv[1]
except IndexError:
print("not checking image availability, no token given")
sys.exit(0)
print("Checking image availability\n")
gl = gitlab.Gitlab("https://zivgitlab.uni-muenster.de", private_token=token)
gl.auth()
pymor_id = 2758
pymor = gl.projects.get(pymor_id)
image_tag = ci_image_tag
mirror_tag = pypi_mirror_tag
images = ["testing", "jupyter"]
mirrors = [f"{r}_py{py}"
for r, py in product(["pypi-mirror_stable", "pypi-mirror_oldest"], pythons)]
images = [f"{r}_py{py}" for r, py in product(images, pythons)]
images += [f"deploy_checks_{os}" for os, _ in testos] + ["python_3.9"]
missing = set((r, mirror_tag) for r in mirrors) | set((r, image_tag) for r in images)
img_count = len(missing)
for repo in pymor.repositories.list(all=True):
wanted = None
match_name = repo.name.replace("pymor/", "")
if match_name in mirrors:
wanted = mirror_tag
elif match_name in images:
wanted = image_tag
if wanted:
try:
tag = repo.tags.get(id=wanted)
missing.remove((match_name, wanted))
except gitlab.exceptions.GitlabGetError:
continue
if len(missing):
try:
from rich.console import Console
from rich.table import Table
table = Table("image", "tag", title="Not found in Container Registry")
for el in sorted(missing):
table.add_row(*el)
console = Console()
console.print(table)
console.print(f"Missing {len(missing)} of {img_count} image:tag pairs")
except (ImportError, ModuleNotFoundError):
print(f"Missing {len(missing)} of {img_count} image:tag pairs")
print(missing)
sys.exit(1)
| true | true |
1c3392e6c9028b64b4c55b56789f2ea41763f5d4 | 690 | py | Python | tests/integration/announce/discord/helpers.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 33 | 2018-01-16T02:04:51.000Z | 2022-03-22T22:57:29.000Z | tests/integration/announce/discord/helpers.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 7 | 2019-06-16T22:02:03.000Z | 2021-10-02T13:45:31.000Z | tests/integration/announce/discord/helpers.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 14 | 2019-06-01T21:39:24.000Z | 2022-03-14T17:56:43.000Z | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from contextlib import contextmanager
from datetime import datetime
from http import HTTPStatus
from requests_mock import Mocker
def now() -> datetime:
return datetime.utcnow()
@contextmanager
def mocked_webhook_receiver(url: str):
with Mocker() as mock:
mock.post(url, status_code=HTTPStatus.NO_CONTENT)
yield mock
def assert_request(mock, expected_content: str) -> None:
assert mock.called
history = mock.request_history
assert len(history) == 1
actual = mock.last_request.json()
assert actual == {'content': expected_content}
| 20.909091 | 57 | 0.726087 |
from contextlib import contextmanager
from datetime import datetime
from http import HTTPStatus
from requests_mock import Mocker
def now() -> datetime:
return datetime.utcnow()
@contextmanager
def mocked_webhook_receiver(url: str):
with Mocker() as mock:
mock.post(url, status_code=HTTPStatus.NO_CONTENT)
yield mock
def assert_request(mock, expected_content: str) -> None:
assert mock.called
history = mock.request_history
assert len(history) == 1
actual = mock.last_request.json()
assert actual == {'content': expected_content}
| true | true |
1c3393cf1f4fce02e2eec81b119a7656e4dd6b91 | 6,401 | py | Python | rabbittop/_rabbitmq.py | jve/rabbittop | 1cac40f66135cff5433e3d6fac99cd0898a927de | [
"MIT"
] | 1 | 2019-08-19T19:30:06.000Z | 2019-08-19T19:30:06.000Z | rabbittop/_rabbitmq.py | jve/rabbittop | 1cac40f66135cff5433e3d6fac99cd0898a927de | [
"MIT"
] | null | null | null | rabbittop/_rabbitmq.py | jve/rabbittop | 1cac40f66135cff5433e3d6fac99cd0898a927de | [
"MIT"
] | null | null | null | """ RabbitMQ API
"""
import httplib
import base64
import os
import json
import collections
import datetime
import logging
_log = logging.getLogger()
# Use urllib.extra_quote to deal with weird names
def overview(host, user, password, port):
return dict(_create_http_client(host, user, password, port, 'overview'))
def status(host, user, password, port, node_name=None):
""" Return the status of the rabbitmq node.
"""
if node_name:
return dict(_create_http_client(host, user, password, port, 'nodes/{0}?memory=true'.format(node_name)))
return _create_http_client(host, user, password, port, 'nodes')
def list_exchanges(host, user, password, port, vhost=''):
""" List all the exchanges in a given vhost
"""
return_values = collections.defaultdict(dict)
for exchange in _create_http_client(host, user, password, port, 'exchanges/{0}'.format(vhost)):
return_values[exchange.pop('vhost')][exchange.pop('name')] = exchange
return dict(return_values)
def list_queues(host, user, password, port, vhost='', name=''):
""" List all the queues in a given vhost
"""
return _create_http_client(host, user, password, port, 'queues/{0}/{1}'.format(vhost, name))
def _create_http_client(host, user, password, port, path, data=None, method='GET'):
""" Create a http client that will talk to the RabbitMQ http API.
"""
if data:
data = json.dumps(data)
conn = httplib.HTTPConnection(host, port=int(port))
credentials = base64.encodestring('{0}:{1}'.format(user, password)).replace('\n', '')
headers = {
'Authorization': "Basic {0}".format(credentials),
'Content-Type': 'application/json'
}
_log.debug(os.path.join('/api', path))
conn.request(method, os.path.join('/api', path), data, headers)
result = conn.getresponse()
result = result.read()
return json.loads(result) if result else None
class Rabbit(object):
message_stat_keys = ['publish', 'confirm', 'return_unroutable', ]
def __init__(self, management_node, user, password, port, vhost=None):
_overview = overview(management_node, user, password, port)
self._vhost = vhost or ''
self.version = _overview['rabbitmq_version']
self.cluster_name = _overview.get('cluster_name')
self.erlang_version = _overview['erlang_version']
self.messages = {
'total': {
'count': _overview['queue_totals']['messages'],
'rate': _overview['queue_totals']['messages_details']['rate']
},
'ready': {
'count': _overview['queue_totals']['messages_ready'],
'rate': _overview['queue_totals']['messages_ready_details']['rate']
},
'unacknowledged': {
'count': _overview['queue_totals']['messages_unacknowledged'],
'rate': _overview['queue_totals']['messages_unacknowledged_details']['rate']
},
}
self._objects = _overview['object_totals']
self._stats = _overview['message_stats']
self._nodes = []
nodes = status(management_node, user, password, port, node_name=None)
for node in nodes:
self._nodes.append(Node(node))
self._queues = []
queues = list_queues(management_node, user, password, port, vhost=self._vhost)
for queue in queues:
self._queues.append(RabbitQueue(queue))
self.active_queues = False
@property
def nodes(self):
return self._nodes
@property
def queues(self):
if self.active_queues:
return [queue for queue in self._queues if queue.state != 'idle']
return self._queues
@property
def objects(self):
return self._objects
@property
def details(self):
keys = {
'publish_details': 'Publish',
'acknowledgment_details': 'Ack',
'confirm_details': 'Confirm',
}
details = {}
for key in keys:
details[keys[key]] = self._stats.get(key, {'rate': 'N/A'})['rate']
return details
class Node(object):
def __init__(self, node_data):
self.name = node_data['name']
self.type = node_data['type']
self.running = node_data['running']
self.pid = node_data['os_pid']
self.fd = node_data['fd_used']
self.fd_total = node_data['fd_total']
self.sockets_used = node_data['sockets_used']
self.sockets_total = node_data['sockets_total']
self.mem_used = node_data['mem_used']
self.mem_limit = node_data['mem_limit']
self.mem_alarm = node_data['mem_alarm']
self.disk_free_limit = node_data['disk_free_limit']
self.disk_free = node_data['disk_free']
self.disk_free_alarm = node_data['disk_free_alarm']
self.proc_used = node_data['proc_used']
self.proc_totoal = node_data['proc_total']
self.uptime = datetime.timedelta(milliseconds=node_data['uptime'])
class RabbitQueue(object):
def __init__(self, queue_data):
self.name = queue_data.get('name')
self.vhost = queue_data.get('vhost')
self.state = queue_data.get('state')
self.policy = ''
self.exclusive = ''
self.params = ''
self.state = ''
self.total = queue_data.get('messages')
self.total_rate = queue_data.get('messages_details', {'rate': 'N/A'})['rate']
self.ready = queue_data.get('messages_ready')
self.ready_rate = queue_data.get('messages_ready_details', {'rate': 'N/A'})['rate']
self.unacked = queue_data.get('messages_unacknowledged')
self.unacked_rate = queue_data.get('messages_unacknowledged_details', {'rate': 'N/A'})['rate']
self.messages = {
'total': {
'count': queue_data.get('messages'),
'rate': queue_data.get('messages_details', {'rate': 'N/A'})['rate']
},
'ready': {
'count': queue_data.get('messages_ready'),
'rate': queue_data.get('messages_ready_details', {'rate': 'N/A'})['rate']
},
'unacknowledged': {
'count': queue_data.get('messages_unacknowledged'),
'rate': queue_data.get('messages_unacknowledged_details', {'rate': 'N/A'})['rate']
},
} | 35.759777 | 111 | 0.612248 |
import httplib
import base64
import os
import json
import collections
import datetime
import logging
_log = logging.getLogger()
def overview(host, user, password, port):
return dict(_create_http_client(host, user, password, port, 'overview'))
def status(host, user, password, port, node_name=None):
if node_name:
return dict(_create_http_client(host, user, password, port, 'nodes/{0}?memory=true'.format(node_name)))
return _create_http_client(host, user, password, port, 'nodes')
def list_exchanges(host, user, password, port, vhost=''):
return_values = collections.defaultdict(dict)
for exchange in _create_http_client(host, user, password, port, 'exchanges/{0}'.format(vhost)):
return_values[exchange.pop('vhost')][exchange.pop('name')] = exchange
return dict(return_values)
def list_queues(host, user, password, port, vhost='', name=''):
return _create_http_client(host, user, password, port, 'queues/{0}/{1}'.format(vhost, name))
def _create_http_client(host, user, password, port, path, data=None, method='GET'):
if data:
data = json.dumps(data)
conn = httplib.HTTPConnection(host, port=int(port))
credentials = base64.encodestring('{0}:{1}'.format(user, password)).replace('\n', '')
headers = {
'Authorization': "Basic {0}".format(credentials),
'Content-Type': 'application/json'
}
_log.debug(os.path.join('/api', path))
conn.request(method, os.path.join('/api', path), data, headers)
result = conn.getresponse()
result = result.read()
return json.loads(result) if result else None
class Rabbit(object):
message_stat_keys = ['publish', 'confirm', 'return_unroutable', ]
def __init__(self, management_node, user, password, port, vhost=None):
_overview = overview(management_node, user, password, port)
self._vhost = vhost or ''
self.version = _overview['rabbitmq_version']
self.cluster_name = _overview.get('cluster_name')
self.erlang_version = _overview['erlang_version']
self.messages = {
'total': {
'count': _overview['queue_totals']['messages'],
'rate': _overview['queue_totals']['messages_details']['rate']
},
'ready': {
'count': _overview['queue_totals']['messages_ready'],
'rate': _overview['queue_totals']['messages_ready_details']['rate']
},
'unacknowledged': {
'count': _overview['queue_totals']['messages_unacknowledged'],
'rate': _overview['queue_totals']['messages_unacknowledged_details']['rate']
},
}
self._objects = _overview['object_totals']
self._stats = _overview['message_stats']
self._nodes = []
nodes = status(management_node, user, password, port, node_name=None)
for node in nodes:
self._nodes.append(Node(node))
self._queues = []
queues = list_queues(management_node, user, password, port, vhost=self._vhost)
for queue in queues:
self._queues.append(RabbitQueue(queue))
self.active_queues = False
@property
def nodes(self):
return self._nodes
@property
def queues(self):
if self.active_queues:
return [queue for queue in self._queues if queue.state != 'idle']
return self._queues
@property
def objects(self):
return self._objects
@property
def details(self):
keys = {
'publish_details': 'Publish',
'acknowledgment_details': 'Ack',
'confirm_details': 'Confirm',
}
details = {}
for key in keys:
details[keys[key]] = self._stats.get(key, {'rate': 'N/A'})['rate']
return details
class Node(object):
def __init__(self, node_data):
self.name = node_data['name']
self.type = node_data['type']
self.running = node_data['running']
self.pid = node_data['os_pid']
self.fd = node_data['fd_used']
self.fd_total = node_data['fd_total']
self.sockets_used = node_data['sockets_used']
self.sockets_total = node_data['sockets_total']
self.mem_used = node_data['mem_used']
self.mem_limit = node_data['mem_limit']
self.mem_alarm = node_data['mem_alarm']
self.disk_free_limit = node_data['disk_free_limit']
self.disk_free = node_data['disk_free']
self.disk_free_alarm = node_data['disk_free_alarm']
self.proc_used = node_data['proc_used']
self.proc_totoal = node_data['proc_total']
self.uptime = datetime.timedelta(milliseconds=node_data['uptime'])
class RabbitQueue(object):
def __init__(self, queue_data):
self.name = queue_data.get('name')
self.vhost = queue_data.get('vhost')
self.state = queue_data.get('state')
self.policy = ''
self.exclusive = ''
self.params = ''
self.state = ''
self.total = queue_data.get('messages')
self.total_rate = queue_data.get('messages_details', {'rate': 'N/A'})['rate']
self.ready = queue_data.get('messages_ready')
self.ready_rate = queue_data.get('messages_ready_details', {'rate': 'N/A'})['rate']
self.unacked = queue_data.get('messages_unacknowledged')
self.unacked_rate = queue_data.get('messages_unacknowledged_details', {'rate': 'N/A'})['rate']
self.messages = {
'total': {
'count': queue_data.get('messages'),
'rate': queue_data.get('messages_details', {'rate': 'N/A'})['rate']
},
'ready': {
'count': queue_data.get('messages_ready'),
'rate': queue_data.get('messages_ready_details', {'rate': 'N/A'})['rate']
},
'unacknowledged': {
'count': queue_data.get('messages_unacknowledged'),
'rate': queue_data.get('messages_unacknowledged_details', {'rate': 'N/A'})['rate']
},
} | true | true |
1c3394928c52fedcbcc3da1f0b982b0115c87fd9 | 3,783 | py | Python | helper.py | ypislon/bachelorarbeit | 2e1d07d667d505a79d1adb9363cec5d21dafe832 | [
"MIT"
] | null | null | null | helper.py | ypislon/bachelorarbeit | 2e1d07d667d505a79d1adb9363cec5d21dafe832 | [
"MIT"
] | null | null | null | helper.py | ypislon/bachelorarbeit | 2e1d07d667d505a79d1adb9363cec5d21dafe832 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
from db_schema import Link
from lxml import html
from io import StringIO
from urllib.parse import urljoin, urlparse
import logging
def parse_articles_url(website):
url_fragment = website.article_page
urls = list()
urls.append(url_fragment)
if "$c$" in url_fragment and len(website.categories):
urls_temp = list()
for i, url in enumerate(urls):
for category in website.categories:
url_temp = url.replace("$c$", category.name)
urls_temp.append(url_temp)
# replace list of urls with new built list
urls = urls_temp
if "$x$" in url_fragment:
urls_temp = list()
for i, url in enumerate(urls):
# TODO
# for i in website.article_identifier_max:
for j in range(1,500):
url_temp = url.replace("$x$", str(j))
urls_temp.append(url_temp)
# replace list of urls with new built list
urls = urls_temp
# create bounds for the date
# duration should be from 1.3.16 to 1.3.18
start_date = date(2016, 3, 1)
end_date = date(2016, 4, 1)
if "$y$" in url_fragment:
urls_temp = list()
for year in (2016, 2017, 2018):
for url in urls:
url_temp = url.replace("$y$", str(year))
urls_temp.append(url_temp) #etc...
urls = urls_temp
if "$m$" in url_fragment:
urls_temp = list()
for month in (1,2,3,4,5,6,7,8,9,10,11,12):
for url in urls:
if(month < 10):
month_string = "0" + str(month)
else:
month_string = str(month)
url_temp = url.replace("$m$", month_string)
urls_temp.append(url_temp)
urls = urls_temp
if "$d$" in url_fragment:
urls_temp = list()
for temp_date in daterange(start_date, end_date):
for url in urls:
day_string = str(temp_date.day)
if(len(day_string)) == 1:
day_string = "0" + day_string
url_temp = url.replace("$d$", day_string)
urls_temp.append(url_temp)
urls = urls_temp
return website, urls
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def parse_articles_links(article):
### Find all links in the articles and create database entries
content_raw = StringIO(article.content_raw)
# try:
tree = html.parse(content_raw)
links = tree.xpath('//a[@href]')
for link in links:
# Filter for broken links or javascrpt links
if len(link.xpath('@href')) == 0:
pass
if "javascript" in link.xpath('@href')[0]:
pass
link_db = Link()
link_db.article = article
# Get the url
if "http" not in link.xpath('@href')[0]:
link_db.url = urljoin(article.url, link.xpath('@href')[0])
else:
link_db.url = link.xpath('@href')[0]
# Parse the domain
try:
link_parsed = urlparse(link_db.url)
link_db.domain = '{uri.scheme}://{uri.netloc}/'.format(uri=link_parsed)
except:
link_db.domain = None
# Try to extract the link text
try:
link_db.link_text = link.xpath('text()')[0]
except:
link_db.link_text = None
link_db.save()
# except:
# # print("Cannot parse tree! URL: %s" % article.url)
# logging.basicConfig(filename="parsing_single_article.txt", filemode='a', level=logging.INFO)
# logging.error("Cannot parse tree! URL: %s" % article.url)
| 34.081081 | 102 | 0.564896 | from datetime import date, datetime, timedelta
from db_schema import Link
from lxml import html
from io import StringIO
from urllib.parse import urljoin, urlparse
import logging
def parse_articles_url(website):
url_fragment = website.article_page
urls = list()
urls.append(url_fragment)
if "$c$" in url_fragment and len(website.categories):
urls_temp = list()
for i, url in enumerate(urls):
for category in website.categories:
url_temp = url.replace("$c$", category.name)
urls_temp.append(url_temp)
urls = urls_temp
if "$x$" in url_fragment:
urls_temp = list()
for i, url in enumerate(urls):
for j in range(1,500):
url_temp = url.replace("$x$", str(j))
urls_temp.append(url_temp)
urls = urls_temp
start_date = date(2016, 3, 1)
end_date = date(2016, 4, 1)
if "$y$" in url_fragment:
urls_temp = list()
for year in (2016, 2017, 2018):
for url in urls:
url_temp = url.replace("$y$", str(year))
urls_temp.append(url_temp)
urls = urls_temp
if "$m$" in url_fragment:
urls_temp = list()
for month in (1,2,3,4,5,6,7,8,9,10,11,12):
for url in urls:
if(month < 10):
month_string = "0" + str(month)
else:
month_string = str(month)
url_temp = url.replace("$m$", month_string)
urls_temp.append(url_temp)
urls = urls_temp
if "$d$" in url_fragment:
urls_temp = list()
for temp_date in daterange(start_date, end_date):
for url in urls:
day_string = str(temp_date.day)
if(len(day_string)) == 1:
day_string = "0" + day_string
url_temp = url.replace("$d$", day_string)
urls_temp.append(url_temp)
urls = urls_temp
return website, urls
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def parse_articles_links(article):
f]')
for link in links:
if len(link.xpath('@href')) == 0:
pass
if "javascript" in link.xpath('@href')[0]:
pass
link_db = Link()
link_db.article = article
if "http" not in link.xpath('@href')[0]:
link_db.url = urljoin(article.url, link.xpath('@href')[0])
else:
link_db.url = link.xpath('@href')[0]
try:
link_parsed = urlparse(link_db.url)
link_db.domain = '{uri.scheme}://{uri.netloc}/'.format(uri=link_parsed)
except:
link_db.domain = None
try:
link_db.link_text = link.xpath('text()')[0]
except:
link_db.link_text = None
link_db.save()
| true | true |
1c3394d89f14bc25458db3b830953f77ae6fec43 | 6,005 | py | Python | treex/losses/mean_absolute_percentage_error.py | ptigwe/treex | c46687376ccc50c8fea6cb8617e22e4b4dd1924a | [
"MIT"
] | null | null | null | treex/losses/mean_absolute_percentage_error.py | ptigwe/treex | c46687376ccc50c8fea6cb8617e22e4b4dd1924a | [
"MIT"
] | null | null | null | treex/losses/mean_absolute_percentage_error.py | ptigwe/treex | c46687376ccc50c8fea6cb8617e22e4b4dd1924a | [
"MIT"
] | null | null | null | import typing as tp
import jax.numpy as jnp
from treex import types, utils
from treex.losses.loss import Loss, Reduction
def mean_absolute_percentage_error(
target: jnp.ndarray, preds: jnp.ndarray
) -> jnp.ndarray:
"""
Computes the mean absolute percentage error (MAPE) between target and predictions.
After computing the absolute distance between the true value and the prediction value
and divide by the true value, the mean value over the last dimension is returned.
Usage:
```python
rng = jax.random.PRNGKey(42)
target = jax.random.randint(rng, shape=(2, 3), minval=0, maxval=2)
preds = jax.random.uniform(rng, shape=(2, 3))
loss = tx.losses.mean_absolute_percentage_error(target, preds)
assert loss.shape == (2,)
assert jnp.array_equal(loss, 100. * jnp.mean(jnp.abs((preds - target) / jnp.clip(target, types.EPSILON, None))))
```
Arguments:
target: Ground truth values. shape = `[batch_size, d0, .. dN]`.
preds: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
target = target.astype(preds.dtype)
diff = jnp.abs((preds - target) / jnp.maximum(jnp.abs(target), types.EPSILON))
return 100.0 * jnp.mean(diff, axis=-1)
class MeanAbsolutePercentageError(Loss):
"""
Computes the mean absolute errors between target and predictions.
`loss = mean(abs((target - preds) / target))`
Usage:
```python
target = jnp.array([[1.0, 1.0], [0.9, 0.0]])
preds = jnp.array([[1.0, 1.0], [1.0, 0.0]])
# Using 'auto'/'sum_over_batch_size' reduction type.
mape = tx.losses.MeanAbsolutePercentageError()
result = mape(target, preds)
assert np.isclose(result, 2.78, rtol=0.01)
# Calling with 'sample_weight'.
assert np.isclose(mape(target, preds, sample_weight=jnp.array([0.1, 0.9])), 2.5, rtol=0.01)
# Using 'sum' reduction type.
mape = tx.losses.MeanAbsolutePercentageError(reduction=tx.losses.Reduction.SUM)
assert np.isclose(mape(target, preds), 5.6, rtol=0.01)
# Using 'none' reduction type.
mape = tx.losses.MeanAbsolutePercentageError(reduction=tx.losses.Reduction.NONE)
assert jnp.all(np.isclose(result, [0. , 5.6], rtol=0.01))
```
Usage with the Elegy API:
```python
model = elegy.Model(
module_fn,
loss=tx.losses.MeanAbsolutePercentageError(),
metrics=elegy.metrics.Mean(),
)
```
"""
def __init__(
self,
reduction: tp.Optional[Reduction] = None,
weight: tp.Optional[float] = None,
on: tp.Optional[types.IndexLike] = None,
**kwargs
):
"""
Initializes `Mean` class.
Arguments:
reduction: (Optional) Type of `tx.losses.Reduction` to apply to
loss. Default value is `SUM_OVER_BATCH_SIZE`. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`elegy` `compile` and `fit`, or `SUM_OVER_BATCH_SIZE`
will raise an error.
for more details.
weight: Optional weight contribution for the total loss. Defaults to `1`.
on: A string or integer, or iterable of string or integers, that
indicate how to index/filter the `target` and `preds`
arguments before passing them to `call`. For example if `on = "a"` then
`target = target["a"]`. If `on` is an iterable
the structures will be indexed iteratively, for example if `on = ["a", 0, "b"]`
then `target = target["a"][0]["b"]`, same for `preds`. For more information
check out [Keras-like behavior](https://poets-ai.github.io/elegy/guides/modules-losses-metrics/#keras-like-behavior).
"""
return super().__init__(reduction=reduction, weight=weight, on=on, **kwargs)
def call(
self,
target: jnp.ndarray,
preds: jnp.ndarray,
sample_weight: tp.Optional[
jnp.ndarray
] = None, # not used, __call__ handles it, left for documentation purposes.
) -> jnp.ndarray:
"""
Invokes the `MeanAbsolutePercentageError` instance.
Arguments:
target: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
preds: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `preds` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
return mean_absolute_percentage_error(target, preds)
| 39.506579 | 134 | 0.603664 | import typing as tp
import jax.numpy as jnp
from treex import types, utils
from treex.losses.loss import Loss, Reduction
def mean_absolute_percentage_error(
target: jnp.ndarray, preds: jnp.ndarray
) -> jnp.ndarray:
target = target.astype(preds.dtype)
diff = jnp.abs((preds - target) / jnp.maximum(jnp.abs(target), types.EPSILON))
return 100.0 * jnp.mean(diff, axis=-1)
class MeanAbsolutePercentageError(Loss):
def __init__(
self,
reduction: tp.Optional[Reduction] = None,
weight: tp.Optional[float] = None,
on: tp.Optional[types.IndexLike] = None,
**kwargs
):
return super().__init__(reduction=reduction, weight=weight, on=on, **kwargs)
def call(
self,
target: jnp.ndarray,
preds: jnp.ndarray,
sample_weight: tp.Optional[
jnp.ndarray
] = None,
) -> jnp.ndarray:
return mean_absolute_percentage_error(target, preds)
| true | true |
1c339520bc67e47717c062f56f9d48e1307b4a84 | 5,979 | py | Python | nemo/collections/nlp/nm/trainables/common/huggingface/roberta_nm.py | borisdayma/NeMo | 88f6c5b93574adb219185d5ded14b6393c485ea0 | [
"Apache-2.0"
] | 10 | 2020-03-17T08:32:06.000Z | 2021-04-19T19:03:50.000Z | nemo/collections/nlp/nm/trainables/common/huggingface/roberta_nm.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
] | null | null | null | nemo/collections/nlp/nm/trainables/common/huggingface/roberta_nm.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
] | 1 | 2020-10-21T18:09:46.000Z | 2020-10-21T18:09:46.000Z | # =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from typing import List, Optional
from transformers import (
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaConfig,
RobertaModel,
)
from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_modules import PretrainedModelInfo
from nemo.core.neural_types import ChannelType, NeuralType
from nemo.utils.decorators import add_port_docs
__all__ = ['Roberta']
class Roberta(TrainableNM):
"""
ROBERTA wraps around the Huggingface implementation of ROBERTA from their
transformers repository for easy use within NeMo.
Args:
pretrained_model_name (str): If using a pretrained model, this should
be the model's name. Otherwise, should be left as None.
config_filename (str): path to model configuration file. Optional.
vocab_size (int): Size of the vocabulary file, if not using a
pretrained model.
hidden_size (int): Size of the encoder and pooler layers.
num_hidden_layers (int): Number of hidden layers in the encoder.
num_attention_heads (int): Number of attention heads for each layer.
intermediate_size (int): Size of intermediate layers in the encoder.
hidden_act (str): Activation function for encoder and pooler layers;
"gelu", "relu", and "swish" are supported.
max_position_embeddings (int): The maximum number of tokens in a
sequence.
"""
@property
@add_port_docs()
def input_ports(self):
"""Returns definitions of module input ports.
input_ids: input token ids
token_type_ids: segment type ids
attention_mask: attention mask
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"token_type_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), ChannelType()),
}
@property
@add_port_docs()
def output_ports(self):
"""Returns definitions of module output ports.
hidden_states: output embedding
"""
return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
def __init__(
self,
pretrained_model_name=None,
config_filename=None,
vocab_size=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
max_position_embeddings=512,
):
super().__init__()
# Check that only one of pretrained_model_name, config_filename, and
# vocab_size was passed in
total = 0
if pretrained_model_name is not None:
total += 1
if config_filename is not None:
total += 1
if vocab_size is not None:
total += 1
if total != 1:
raise ValueError(
"Only one of pretrained_model_name, vocab_size, "
+ "or config_filename should be passed into the "
+ "ROBERTA constructor."
)
# TK: The following code checks the same once again.
if vocab_size is not None:
config = RobertaConfig(
vocab_size_or_config_json_file=vocab_size,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
)
model = RobertaModel(config)
elif pretrained_model_name is not None:
model = RobertaModel.from_pretrained(pretrained_model_name)
elif config_filename is not None:
config = RobertaConfig.from_json_file(config_filename)
model = RobertaModel(config)
else:
raise ValueError(
"Either pretrained_model_name or vocab_size must" + " be passed into the ROBERTA constructor"
)
model.to(self._device)
self.add_module("roberta", model)
self.config = model.config
self._hidden_size = model.config.hidden_size
@property
def hidden_size(self):
"""
Property returning hidden size.
Returns:
Hidden size.
"""
return self._hidden_size
@staticmethod
def list_pretrained_models() -> Optional[List[PretrainedModelInfo]]:
pretrained_models = []
for key, value in ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.items():
model_info = PretrainedModelInfo(
pretrained_model_name=key,
description="weights by HuggingFace",
parameters=ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP[key],
location=value,
)
pretrained_models.append(model_info)
return pretrained_models
def forward(self, input_ids, token_type_ids, attention_mask):
return self.roberta(input_ids, attention_mask=attention_mask)[0]
| 36.457317 | 109 | 0.629369 |
from typing import List, Optional
from transformers import (
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaConfig,
RobertaModel,
)
from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_modules import PretrainedModelInfo
from nemo.core.neural_types import ChannelType, NeuralType
from nemo.utils.decorators import add_port_docs
__all__ = ['Roberta']
class Roberta(TrainableNM):
@property
@add_port_docs()
def input_ports(self):
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"token_type_ids": NeuralType(('B', 'T'), ChannelType()),
"attention_mask": NeuralType(('B', 'T'), ChannelType()),
}
@property
@add_port_docs()
def output_ports(self):
return {"hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
def __init__(
self,
pretrained_model_name=None,
config_filename=None,
vocab_size=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
max_position_embeddings=512,
):
super().__init__()
total = 0
if pretrained_model_name is not None:
total += 1
if config_filename is not None:
total += 1
if vocab_size is not None:
total += 1
if total != 1:
raise ValueError(
"Only one of pretrained_model_name, vocab_size, "
+ "or config_filename should be passed into the "
+ "ROBERTA constructor."
)
if vocab_size is not None:
config = RobertaConfig(
vocab_size_or_config_json_file=vocab_size,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
)
model = RobertaModel(config)
elif pretrained_model_name is not None:
model = RobertaModel.from_pretrained(pretrained_model_name)
elif config_filename is not None:
config = RobertaConfig.from_json_file(config_filename)
model = RobertaModel(config)
else:
raise ValueError(
"Either pretrained_model_name or vocab_size must" + " be passed into the ROBERTA constructor"
)
model.to(self._device)
self.add_module("roberta", model)
self.config = model.config
self._hidden_size = model.config.hidden_size
@property
def hidden_size(self):
return self._hidden_size
@staticmethod
def list_pretrained_models() -> Optional[List[PretrainedModelInfo]]:
pretrained_models = []
for key, value in ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.items():
model_info = PretrainedModelInfo(
pretrained_model_name=key,
description="weights by HuggingFace",
parameters=ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP[key],
location=value,
)
pretrained_models.append(model_info)
return pretrained_models
def forward(self, input_ids, token_type_ids, attention_mask):
return self.roberta(input_ids, attention_mask=attention_mask)[0]
| true | true |
1c33959f6b47a21892dd478875a12725a9158efb | 3,090 | py | Python | corai_util/finance/src/implied_vol.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | 1 | 2022-01-01T22:10:04.000Z | 2022-01-01T22:10:04.000Z | corai_util/finance/src/implied_vol.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | corai_util/finance/src/implied_vol.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | # normal libraries
import warnings
import numpy as np
from scipy.optimize import bisect
from scipy.stats import norm
# priv_libraries
from corai_util.finance.src.bs_model import BlackScholes, BlackScholesVegaCore
from corai_util.calculus.src.optimization import newtons_method_vectorised
# section ######################################################################
# #############################################################################
# IV
def implied_vol_bisect(CallPutFlag, s0, K, T, R, d, experimented_price):
"""
Args:
CallPutFlag:
s0: starting point of the S's,
K: strike price
T:
R:
d:
experimented_price:
Returns:
"""
# Bisection algorithm when the Lee-Li algorithm breaks down
def _smileMin(vol, *args):
K, s0, T, r, price = args
return price - BlackScholes(CallPutFlag, s0, K, T, r, d, sigma=vol)
vMin, vMax = 0.00001, 20.
# in order to find the implied volatility, one has to find the value at which smileMin crosses zero.
try:
return bisect(_smileMin, vMin, vMax, args=(K, s0, T, R, experimented_price),
xtol=1e-20,
rtol=1e-15,
full_output=False, disp=True)
except ValueError:
warnings.warn("Bisect didn't find the implied volatility \sigma_{IMP}, returned NaN.")
return np.NaN
def implied_volatility_newton(CallPutFlag, s0, K, T, R, d, experimented_price):
"""
Compute Implied Volatility by newton's method.
The function is vectorised regarding K.
Args:
CallPutFlag:
d: dividends
K: strike price
s0: initial price
T: maturity
R: rate of interest rates
experimented_price: price of the underlying
Returns: the Implied Volatility \sigma_imp
"""
assert len(K) == len(experimented_price)
fx = lambda varSIGMA, indices: BlackScholes(CallPutFlag, s0, K[indices],
T, R, d, sigma=varSIGMA[indices]) - experimented_price[indices]
dfx = lambda varSIGMA, indices: BlackScholesVegaCore(np.exp(-R * T), np.exp((R - d) * T) * s0,
K[indices], T, varSIGMA[indices])
try:
x = np.full(len(experimented_price), 1.)
newtons_method_vectorised(fx, dfx, x)
return x
except ValueError:
warnings.warn("Bisect did not find the $\sigma_{IMP}$, returned NaN.")
return np.NaN
# section ######################################################################
# #############################################################################
# Total IV
def total_implied_vol_bisect(CallPutFlag, s0, K, R, d, experimented_price):
sigma = implied_vol_bisect(CallPutFlag, s0, K, 1, R, d, experimented_price)
return sigma * sigma
def total_implied_vol_newton(CallPutFlag, s0, K, R, d, experimented_price):
sigma = implied_volatility_newton(CallPutFlag, s0, K, 1, R, d, experimented_price)
return sigma * sigma
| 31.530612 | 111 | 0.568608 |
import warnings
import numpy as np
from scipy.optimize import bisect
from scipy.stats import norm
from corai_util.finance.src.bs_model import BlackScholes, BlackScholesVegaCore
from corai_util.calculus.src.optimization import newtons_method_vectorised
| true | true |
1c33963ddc2208d0f62dde80080df6d235a6776b | 3,576 | py | Python | src/util/logging.py | ireina7/gzsl-seg | 9aad220274b4a58b59f5da430f873b5dfc21e458 | [
"MIT"
] | 1 | 2022-03-15T04:46:00.000Z | 2022-03-15T04:46:00.000Z | src/util/logging.py | ireina7/gzsl-seg | 9aad220274b4a58b59f5da430f873b5dfc21e458 | [
"MIT"
] | null | null | null | src/util/logging.py | ireina7/gzsl-seg | 9aad220274b4a58b59f5da430f873b5dfc21e458 | [
"MIT"
] | null | null | null | import sys
import matplotlib.pyplot as plt
from util.typing.basic import *
from src.config import *
def show_figure_nonblocking() -> None:
plt.show(block = False)
plt.pause(0.001)
#end show_figure_nonblocking
def show_figure_blocking() -> None:
plt.show()
#end show_figure_blocking
def show_if(EPOCH: int, LOOP: int):
def f(epoch: int, loop: int, call_back) -> None:
if epoch % EPOCH == 0 and loop % LOOP == 0:
call_back()
return f
show_figure_if = show_if(EPOCH_TO_SHOW_FIGURE, LOOP_TO_SHOW_FIGURE)
show_msg_if = show_if(EPOCH_TO_SHOW_MSG, LOOP_TO_SHOW_MSG )
def draw_sample(batch):
imgs, msks = batch['image'], batch['label']
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].imshow(imgs[0].permute(1, 2, 0))
axs[1].imshow(msks[0], cmap = 'tab20', vmin = 0, vmax = 21)
#end draw_sample
# def show_sample(batch):
# draw_sample(batch)
# log("Displaying image of {}".format(batch['name']))
# # plt.colorbar()
# show_figure_nonblocking()
# #end show_sample
def show_single_figure_result(batch, pred, mask):
ans = pred[0].clone().detach().cpu().numpy()
#x = np.where(ans == 0, 255, ans)
x = ans
y = mask.cpu()[0]
x[y == 255] = 255
draw_sample(batch)
# pyplot.figure()
plt.imshow(x, cmap = 'tab20', vmin = 0, vmax = 21)
plt.colorbar()
# show_figure_nonblocking()
end = 0
default_files = {
'msg': sys.stdout,
'err': sys.stderr,
'mod': './output',
}
class Logger(object):
"""
The main Logger
@param files: {
msg: File | sys.stdout -- Message
err: File | sys.stderr -- Error
mod: File -- model
} -- Determine where the logger should log.
@param painter: Painter -- Determine how to log and show figures.
"""
def __init__(self, files=default_files):
self.files = files
# self.painter = painter
end
#end __init__
def log(self, msg: str) -> None:
"""
Log messages
"""
log_msg = f'[info] {msg}'
print(log_msg, file=self.files['msg'])
#end log
def debug(self, msg: str, description: str = "") -> None:
"""
For debugging.
Should have the same output file as the `log` method.
"""
dbg_msg = f'[debug] {msg}'
print(dbg_msg, file=self.files['msg'])
#end debug
def error(self, msg: str) -> None:
"""
Print error messages.
"""
err_msg = f'[error] {msg}'
print(err_msg, file=self.files['err'])
#end error
def custom(self, tag: str):
"""
For custom logging.
"""
def custom_msg(msg: str) -> None:
cus_msg = f'{tag} {msg}'
print(cus_msg, file=self.files['msg'])
#end custom_msg
return custom_msg
#end custom
def blank_line(self, i: int=1) -> None:
"""
Only for log.
Should not be used in error logging and others.
"""
print("", file=self.files['msg'])
#end blank_line
#end class Logger
logger = Logger()
class Painter(object):
def __init__(self, logger: Logger=logger):
self.logger = logger
def plot(self, xs: List[int], ys: List[int], style='.-') -> None:
plt.figure()
plt.plot(xs, ys, style)
plt.grid()
#end plot
def draw_sample(self, batch):
draw_sample(batch)
def draw_seg_result(self, batch, pred, mask):
show_single_figure_result(batch, pred, mask)
def save_figure(self, path: str) -> None:
try:
plt.savefig(path)
self.logger.log(f'saved figure {path}.')
except IOError:
self.logger.error(f'Trying to save figure {path} failed: {IOError}')
#end save_figure
#end save_figure
#end class Painter
painter = Painter()
| 21.672727 | 74 | 0.626957 | import sys
import matplotlib.pyplot as plt
from util.typing.basic import *
from src.config import *
def show_figure_nonblocking() -> None:
plt.show(block = False)
plt.pause(0.001)
def show_figure_blocking() -> None:
plt.show()
def show_if(EPOCH: int, LOOP: int):
def f(epoch: int, loop: int, call_back) -> None:
if epoch % EPOCH == 0 and loop % LOOP == 0:
call_back()
return f
show_figure_if = show_if(EPOCH_TO_SHOW_FIGURE, LOOP_TO_SHOW_FIGURE)
show_msg_if = show_if(EPOCH_TO_SHOW_MSG, LOOP_TO_SHOW_MSG )
def draw_sample(batch):
imgs, msks = batch['image'], batch['label']
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].imshow(imgs[0].permute(1, 2, 0))
axs[1].imshow(msks[0], cmap = 'tab20', vmin = 0, vmax = 21)
lt(batch, pred, mask):
ans = pred[0].clone().detach().cpu().numpy()
x = ans
y = mask.cpu()[0]
x[y == 255] = 255
draw_sample(batch)
plt.imshow(x, cmap = 'tab20', vmin = 0, vmax = 21)
plt.colorbar()
end = 0
default_files = {
'msg': sys.stdout,
'err': sys.stderr,
'mod': './output',
}
class Logger(object):
def __init__(self, files=default_files):
self.files = files
end
def log(self, msg: str) -> None:
log_msg = f'[info] {msg}'
print(log_msg, file=self.files['msg'])
def debug(self, msg: str, description: str = "") -> None:
dbg_msg = f'[debug] {msg}'
print(dbg_msg, file=self.files['msg'])
def error(self, msg: str) -> None:
err_msg = f'[error] {msg}'
print(err_msg, file=self.files['err'])
def custom(self, tag: str):
def custom_msg(msg: str) -> None:
cus_msg = f'{tag} {msg}'
print(cus_msg, file=self.files['msg'])
return custom_msg
def blank_line(self, i: int=1) -> None:
print("", file=self.files['msg'])
logger = Logger()
class Painter(object):
def __init__(self, logger: Logger=logger):
self.logger = logger
def plot(self, xs: List[int], ys: List[int], style='.-') -> None:
plt.figure()
plt.plot(xs, ys, style)
plt.grid()
def draw_sample(self, batch):
draw_sample(batch)
def draw_seg_result(self, batch, pred, mask):
show_single_figure_result(batch, pred, mask)
def save_figure(self, path: str) -> None:
try:
plt.savefig(path)
self.logger.log(f'saved figure {path}.')
except IOError:
self.logger.error(f'Trying to save figure {path} failed: {IOError}')
painter = Painter()
| true | true |
1c3396417062d3ec1b72e781ca37319cb0613da5 | 290 | py | Python | django_lightweight_queue/progress_logger.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 23 | 2015-04-29T04:47:02.000Z | 2022-03-11T12:43:01.000Z | django_lightweight_queue/progress_logger.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 23 | 2015-02-27T14:30:47.000Z | 2021-12-02T14:18:34.000Z | django_lightweight_queue/progress_logger.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 1 | 2015-08-18T12:27:08.000Z | 2015-08-18T12:27:08.000Z | from typing import TypeVar, Callable, Iterable, NamedTuple
T = TypeVar('T')
ProgressLogger = NamedTuple('ProgressLogger', [
('info', Callable[[str], None]),
('progress', Callable[[Iterable[T]], Iterable[T]]),
])
NULL_PROGRESS_LOGGER = ProgressLogger(lambda x: None, lambda x: x)
| 26.363636 | 66 | 0.696552 | from typing import TypeVar, Callable, Iterable, NamedTuple
T = TypeVar('T')
ProgressLogger = NamedTuple('ProgressLogger', [
('info', Callable[[str], None]),
('progress', Callable[[Iterable[T]], Iterable[T]]),
])
NULL_PROGRESS_LOGGER = ProgressLogger(lambda x: None, lambda x: x)
| true | true |
1c3397c90b18c260506989d6c1cb6c79112d4026 | 82,161 | py | Python | slack/web/client.py | sydneyq/Coin-the-Cat | 9d24b836941aa9b159d9214f5301e1794bb87c2f | [
"MIT"
] | null | null | null | slack/web/client.py | sydneyq/Coin-the-Cat | 9d24b836941aa9b159d9214f5301e1794bb87c2f | [
"MIT"
] | null | null | null | slack/web/client.py | sydneyq/Coin-the-Cat | 9d24b836941aa9b159d9214f5301e1794bb87c2f | [
"MIT"
] | null | null | null | """A Python module for interacting with Slack's Web API."""
import os
from asyncio import Future
from io import IOBase
from typing import Union, List, Optional, Dict
import slack.errors as e
from slack.web.base_client import BaseClient, SlackResponse
from slack.web.classes.views import View
class WebClient(BaseClient):
"""A WebClient allows apps to communicate with the Slack Platform's Web API.
The Slack Web API is an interface for querying information from
and enacting change in a Slack workspace.
This client handles constructing and sending HTTP requests to Slack
as well as parsing any responses received into a `SlackResponse`.
Attributes:
token (str): A string specifying an xoxp or xoxb token.
use_session (bool): An boolean specifying if the client
should take advantage of connection pooling.
Default is True.
base_url (str): A string representing the Slack API base URL.
Default is 'https://www.slack.com/api/'
timeout (int): The maximum number of seconds the client will wait
to connect and receive a response from Slack.
Default is 30 seconds.
Methods:
api_call: Constructs a request and executes the API call to Slack.
Example of recommended usage:
```python
import os
import slack
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.chat_postMessage(
channel='#random',
text="Hello world!")
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Example manually creating an API request:
```python
import os
import slack
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.api_call(
api_method='chat.postMessage',
json={'channel': '#random','text': "Hello world!"}
)
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Note:
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def admin_apps_approve(
self, *, app_id: str = None, request_id: str = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Approve an app for installation on a workspace.
Either app_id or request_id is required.
These IDs can be obtained either directly via the app_requested event,
or by the admin.apps.requests.list method.
Args:
app_id (str): The id of the app to approve. e.g. 'A12345'
request_id (str): The id of the request to approve. e.g. 'Ar12345'
Raises:
SlackRequestError: If neither or both the `app_id` and `request_id` args are specified.
"""
if app_id:
kwargs.update({"app_id": app_id})
elif request_id:
kwargs.update({"request_id": request_id})
else:
raise e.SlackRequestError(
"The app_id or request_id argument must be specified."
)
return self.api_call("admin.apps.approve", json=kwargs)
def admin_apps_approved_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List approved apps for an org or workspace."""
return self.api_call("admin.apps.approved.list", http_verb="GET", params=kwargs)
def admin_apps_requests_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List app requests for a team/workspace."""
return self.api_call("admin.apps.requests.list", http_verb="GET", params=kwargs)
def admin_apps_restrict(self, **kwargs) -> Union[Future, SlackResponse]:
"""Restrict an app for installation on a workspace."""
return self.api_call("admin.apps.restrict", json=kwargs)
def admin_apps_restricted_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List restricted apps for an org or workspace."""
return self.api_call(
"admin.apps.restricted.list", http_verb="GET", params=kwargs
)
def admin_conversations_restrictAccess_addGroup(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""Add an allowlist of IDP groups for accessing a channel."""
return self.api_call(
"admin.conversations.restrictAccess.addGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_listGroups(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all IDP Groups linked to a channel."""
return self.api_call(
"admin.conversations.restrictAccess.listGroups",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_removeGroup(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""Remove a linked IDP group linked from a private channel."""
return self.api_call(
"admin.conversations.restrictAccess.removeGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_setTeams(self, **kwargs) -> Union[Future, SlackResponse]:
"""Set the workspaces in an Enterprise grid org that connect to a channel."""
return self.api_call("admin.conversations.setTeams", json=kwargs)
def admin_emoji_add(self, **kwargs) -> Union[Future, SlackResponse]:
"""Add an emoji."""
return self.api_call("admin.emoji.add", http_verb="GET", params=kwargs)
def admin_emoji_addAlias(self, **kwargs) -> Union[Future, SlackResponse]:
"""Add an emoji alias."""
return self.api_call("admin.emoji.addAlias", http_verb="GET", params=kwargs)
def admin_emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List emoji for an Enterprise Grid organization."""
return self.api_call("admin.emoji.list", http_verb="GET", params=kwargs)
def admin_emoji_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Remove an emoji across an Enterprise Grid organization."""
return self.api_call("admin.emoji.remove", http_verb="GET", params=kwargs)
def admin_emoji_rename(self, **kwargs) -> Union[Future, SlackResponse]:
"""Rename an emoji."""
return self.api_call("admin.emoji.rename", http_verb="GET", params=kwargs)
def admin_users_session_reset(
self, *, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Wipes all valid sessions on all devices for a given user.
Args:
user_id (str): The ID of the user to wipe sessions for. e.g. 'W12345678'
"""
kwargs.update({"user_id": user_id})
return self.api_call("admin.users.session.reset", json=kwargs)
def admin_inviteRequests_approve(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Approve a workspace invite request.
team_id is required if your Enterprise Grid org contains more than one workspace.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.approve", json=kwargs)
def admin_inviteRequests_approved_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all approved workspace invite requests."""
return self.api_call("admin.inviteRequests.approved.list", json=kwargs)
def admin_inviteRequests_denied_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all denied workspace invite requests."""
return self.api_call("admin.inviteRequests.denied.list", json=kwargs)
def admin_inviteRequests_deny(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deny a workspace invite request.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.deny", json=kwargs)
def admin_inviteRequests_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all pending workspace invite requests."""
return self.api_call("admin.inviteRequests.list", json=kwargs)
def admin_teams_admins_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.admins.list", http_verb="GET", params=kwargs)
def admin_teams_create(
self, *, team_domain: str, team_name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Create an Enterprise team.
Args:
team_domain (str): Team domain. e.g. 'slacksoftballteam'
team_name (str): Team name. e.g. 'Slack Softball Team'
"""
kwargs.update({"team_domain": team_domain, "team_name": team_name})
return self.api_call("admin.teams.create", json=kwargs)
def admin_teams_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all teams on an Enterprise organization."""
return self.api_call("admin.teams.list", json=kwargs)
def admin_teams_owners_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.owners.list", http_verb="GET", params=kwargs)
def admin_teams_settings_info(
self, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetch information about settings in a workspace
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.settings.info", json=kwargs)
def admin_teams_settings_setDefaultChannels(
self, *, team_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Set the default channels of a workspace.
Args:
team_id (str): ID of the team.
channel_ids (str or list): A list of channel_ids.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call(
"admin.teams.settings.setDefaultChannels", http_verb="GET", params=kwargs
)
def admin_teams_settings_setDescription(
self, *, team_id: str, description: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set the description of a given workspace.
Args:
team_id (str): ID of the team.
description (str): Description of the team.
"""
kwargs.update({"team_id": team_id, "description": description})
return self.api_call("admin.teams.settings.setDescription", json=kwargs)
def admin_teams_settings_setDiscoverability(
self, *, team_id: str, discoverability: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
discoverability (str): This workspace's discovery setting.
It must be set to one of open, invite_only, closed, or unlisted.
"""
kwargs.update({"team_id": team_id, "discoverability": discoverability})
return self.api_call("admin.teams.settings.setDiscoverability", json=kwargs)
def admin_teams_settings_setIcon(
self, *, team_id: str, image_url: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
image_url (str): Url of the icon.
"""
kwargs.update({"team_id": team_id, "image_url": image_url})
return self.api_call(
"admin.teams.settings.setIcon", http_verb="GET", params=kwargs
)
def admin_teams_settings_setName(
self, *, team_id: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
name (str): Name of the team.
"""
kwargs.update({"team_id": team_id, "name": name})
return self.api_call("admin.teams.settings.setName", json=kwargs)
def admin_usergroups_addChannels(
self,
*,
team_id: str,
usergroup_id: str,
channel_ids: Union[str, List[str]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
team_id (str): The workspace to add default channels in. e.g. 'T1234'
usergroup_id (str): ID of the IDP group to add default channels for. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"team_id": team_id, "usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.addChannels", json=kwargs)
def admin_usergroups_listChannels(
self, *, usergroup_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group to list default channels for. e.g. 'S1234'
"""
kwargs.update({"usergroup_id": usergroup_id})
return self.api_call("admin.usergroups.listChannels", json=kwargs)
def admin_usergroups_removeChannels(
self, *, usergroup_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.removeChannels", json=kwargs)
def admin_users_assign(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Add an Enterprise user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): ID of the user to add to the workspace.
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.assign", json=kwargs)
def admin_users_invite(
self, *, team_id: str, email: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Invite a user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
email (str): The email address of the person to invite. e.g. 'joe@email.com'
channel_ids (str or list): A list of channel_ids for this user to join.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id, "email": email})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.users.invite", json=kwargs)
def admin_users_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List users on a workspace
Args:
team_id (str): ID of the team. e.g. 'T1234'
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.users.list", json=kwargs)
def admin_users_remove(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Remove a user from a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.remove", json=kwargs)
def admin_users_setAdmin(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest, regular user, or owner to be an admin user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setAdmin", json=kwargs)
def admin_users_setExpiration(
self, *, expiration_ts: int, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an expiration for a guest user.
Args:
expiration_ts (int): Timestamp when guest account should be disabled. e.g. '1234567890'
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to set an expiration for. e.g. 'W12345678'
"""
kwargs.update(
{"expiration_ts": expiration_ts, "team_id": team_id, "user_id": user_id}
)
return self.api_call("admin.users.setExpiration", json=kwargs)
def admin_users_setOwner(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest, regular user, or admin user to be a workspace owner.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setOwner", json=kwargs)
def admin_users_setRegular(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest user, admin user, or owner to be a regular user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setRegular", json=kwargs)
def api_test(self, **kwargs) -> Union[Future, SlackResponse]:
"""Checks API calling code."""
return self.api_call("api.test", json=kwargs)
def auth_revoke(self, **kwargs) -> Union[Future, SlackResponse]:
"""Revokes a token."""
return self.api_call("auth.revoke", http_verb="GET", params=kwargs)
def auth_test(self, **kwargs) -> Union[Future, SlackResponse]:
"""Checks authentication & identity."""
return self.api_call("auth.test", json=kwargs)
def bots_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a bot user."""
return self.api_call("bots.info", http_verb="GET", params=kwargs)
def calls_add(
self, *, external_unique_id: str, join_url: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Registers a new Call.
Args:
external_unique_id (str): An ID supplied by the 3rd-party Call provider.
It must be unique across all Calls from that service.
e.g. '025169F6-E37A-4E62-BB54-7F93A0FC4C1F'
join_url (str): The URL required for a client to join the Call.
e.g. 'https://example.com/calls/1234567890'
"""
kwargs.update({"external_unique_id": external_unique_id, "join_url": join_url})
self._update_call_participants(kwargs, kwargs.get("users", None))
return self.api_call("calls.add", http_verb="POST", params=kwargs)
def calls_end(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
"""Ends a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.end", http_verb="POST", params=kwargs)
def calls_info(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
"""Returns information about a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.info", http_verb="POST", params=kwargs)
def calls_participants_add(
self, *, id: str, users: Union[str, List[Dict[str, str]]], **kwargs
) -> Union[Future, SlackResponse]:
"""Registers new participants added to a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to add as participants in the Call.
"""
kwargs.update({"id": id})
self._update_call_participants(kwargs, users)
return self.api_call("calls.participants.add", http_verb="POST", params=kwargs)
def calls_update(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
"""Updates information about a Call.
Args:
id (str): id returned by the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.update", http_verb="POST", params=kwargs)
def channels_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Archives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.archive", json=kwargs)
def channels_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Creates a channel.
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("channels.create", json=kwargs)
def channels_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.history", http_verb="GET", params=kwargs)
def channels_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.info", http_verb="GET", params=kwargs)
def channels_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Invites a user to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.invite", json=kwargs)
def channels_join(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Joins a channel, creating it if needed.
Args:
name (str): The channel name. e.g. '#general'
"""
kwargs.update({"name": name})
return self.api_call("channels.join", json=kwargs)
def channels_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.kick", json=kwargs)
def channels_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Leaves a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.leave", json=kwargs)
def channels_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all channels in a Slack team."""
return self.api_call("channels.list", http_verb="GET", params=kwargs)
def channels_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("channels.mark", json=kwargs)
def channels_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs)
def channels_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
def channels_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("channels.setPurpose", json=kwargs)
def channels_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("channels.setTopic", json=kwargs)
def channels_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Unarchives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.unarchive", json=kwargs)
def chat_delete(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs)
def chat_deleteScheduledMessage(
self, *, channel: str, scheduled_message_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a scheduled message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
scheduled_message_id (str): scheduled_message_id returned from call to chat.scheduleMessage e.g. 'Q1234ABCD'
"""
kwargs.update(
{"channel": channel, "scheduled_message_id": scheduled_message_id}
)
return self.api_call("chat.deleteScheduledMessage", json=kwargs)
def chat_getPermalink(
self, *, channel: str, message_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a permalink URL for a specific extant message
Args:
channel (str): The channel id. e.g. 'C1234567890'
message_ts (str): The timestamp. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "message_ts": message_ts})
return self.api_call("chat.getPermalink", http_verb="GET", params=kwargs)
def chat_meMessage(
self, *, channel: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Share a me message into a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs)
def chat_postEphemeral(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sends an ephemeral message to a user in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of user who should see the message. e.g. 'U0BPQUNTA'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "user": user})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.postEphemeral", json=kwargs)
def chat_postMessage(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sends a message to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.postMessage", json=kwargs)
def chat_scheduleMessage(
self, *, channel: str, post_at: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Schedules a message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
post_at (str): Unix EPOCH timestamp of time in future to send the message. e.g. '299876400'
text (str): The message you'd like to send. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "post_at": post_at, "text": text})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.scheduleMessage", json=kwargs)
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> Union[Future, SlackResponse]:
"""Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
"""
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
def chat_update(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Updates a message in a channel.
Args:
channel (str): The channel containing the message to be updated. e.g. 'C1234567890'
ts (str): Timestamp of the message to be updated. e.g. '1234567890.123456'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "ts": ts})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.update", json=kwargs)
def chat_scheduledMessages_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all scheduled messages."""
return self.api_call("chat.scheduledMessages.list", json=kwargs)
def conversations_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Archives a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.archive", json=kwargs)
def conversations_close(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Closes a direct message or multi-person direct message.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.close", json=kwargs)
def conversations_create(
self, *, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Initiates a public or private channel-based conversation
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("conversations.create", json=kwargs)
def conversations_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetches a conversation's history of messages and events.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.history", http_verb="GET", params=kwargs)
def conversations_info(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve information about a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.info", http_verb="GET", params=kwargs)
def conversations_invite(
self, *, channel: str, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (str or list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
"""
kwargs.update({"channel": channel})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("conversations.invite", json=kwargs)
def conversations_join(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Joins an existing conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.join", json=kwargs)
def conversations_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of the user to kick. e.g. 'U2345678901'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("conversations.kick", json=kwargs)
def conversations_leave(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Leaves a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.leave", json=kwargs)
def conversations_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all channels in a Slack team."""
return self.api_call("conversations.list", http_verb="GET", params=kwargs)
def conversations_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a channel.
Args:
channel (str): Channel or conversation to set the read cursor for e.g. 'C1234567890'
ts (str): Unique identifier of message to mark as most recently seen in the convo e.g. '1593473566.000200'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.mark", json=kwargs)
def conversations_members(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve members of a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.members", http_verb="GET", params=kwargs)
def conversations_open(self, **kwargs) -> Union[Future, SlackResponse]:
"""Opens or resumes a direct message or multi-person direct message."""
return self.api_call("conversations.open", json=kwargs)
def conversations_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("conversations.rename", json=kwargs)
def conversations_replies(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a conversation
Args:
channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'
ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
def conversations_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("conversations.setPurpose", json=kwargs)
def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs)
def conversations_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Reverses conversation archival.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.unarchive", json=kwargs)
def dialog_open(
self, *, dialog: dict, trigger_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Open a dialog with a user.
Args:
dialog (dict): A dictionary of dialog arguments.
{
"callback_id": "46eh782b0",
"title": "Request something",
"submit_label": "Request",
"state": "Max",
"elements": [
{
"type": "text",
"label": "Origin",
"name": "loc_origin"
},
{
"type": "text",
"label": "Destination",
"name": "loc_destination"
}
]
}
trigger_id (str): The trigger id of a recent message interaction.
e.g. '12345.98765.abcd2358fdea'
"""
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs)
def dnd_endDnd(self, **kwargs) -> Union[Future, SlackResponse]:
"""Ends the current user's Do Not Disturb session immediately."""
return self.api_call("dnd.endDnd", json=kwargs)
def dnd_endSnooze(self, **kwargs) -> Union[Future, SlackResponse]:
"""Ends the current user's snooze mode immediately."""
return self.api_call("dnd.endSnooze", json=kwargs)
def dnd_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieves a user's current Do Not Disturb status."""
return self.api_call("dnd.info", http_verb="GET", params=kwargs)
def dnd_setSnooze(
self, *, num_minutes: int, **kwargs
) -> Union[Future, SlackResponse]:
"""Turns on Do Not Disturb mode for the current user, or changes its duration.
Args:
num_minutes (int): The snooze duration. e.g. 60
"""
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
def dnd_teamInfo(
self, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieves the Do Not Disturb status for users on a team.
Args:
users (str or list): User IDs to fetch information e.g. 'U123,U234' or ["U123", "U234"]
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("dnd.teamInfo", http_verb="GET", params=kwargs)
def emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists custom emoji for a team."""
return self.api_call("emoji.list", http_verb="GET", params=kwargs)
def files_comments_delete(
self, *, file: str, id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
"""
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
def files_delete(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
"""Deletes a file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.delete", json=kwargs)
def files_info(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a team file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.info", http_verb="GET", params=kwargs)
def files_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists & filters team files."""
return self.api_call("files.list", http_verb="GET", params=kwargs)
def files_remote_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.info", http_verb="GET", params=kwargs)
def files_remote_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.list", http_verb="GET", params=kwargs)
def files_remote_add(
self, *, external_id: str, external_url: str, title: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Adds a file from a remote service.
Args:
external_id (str): Creator defined GUID for the file. e.g. '123456'
external_url (str): URL of the remote file. e.g. 'http://example.com/my_cloud_service_file/abc123'
title (str): Title of the file being shared. e.g. 'Danger, High Voltage!'
"""
kwargs.update(
{"external_id": external_id, "external_url": external_url, "title": title}
)
files = None
# preview_image (file): Preview of the document via multipart/form-data.
if "preview_image" in kwargs:
files = {"preview_image": kwargs.pop("preview_image")}
return self.api_call(
# Intentionally using "POST" method over "GET" here
"files.remote.add",
http_verb="POST",
data=kwargs,
files=files,
)
def files_remote_update(self, **kwargs) -> Union[Future, SlackResponse]:
"""Updates an existing remote file."""
return self.api_call("files.remote.update", http_verb="GET", params=kwargs)
def files_remote_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Remove a remote file."""
return self.api_call("files.remote.remove", http_verb="GET", params=kwargs)
def files_remote_share(
self, *, channels: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Share a remote file into a channel.
Args:
channels (str or list): Comma-separated list of channel IDs where the file will be shared.
e.g. ['C1234567890', 'C2345678901']
"""
if isinstance(channels, list):
kwargs.update({"channels": ",".join(channels)})
else:
kwargs.update({"channels": channels})
return self.api_call("files.remote.share", http_verb="GET", params=kwargs)
def files_revokePublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Revokes public/external sharing access for a file
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.revokePublicURL", json=kwargs)
def files_sharedPublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Enables a file for public/external sharing.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.sharedPublicURL", json=kwargs)
def files_upload(
self, *, file: Union[str, IOBase] = None, content: str = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If niether or both the `file` and `content` args are specified.
"""
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
if "filename" not in kwargs:
# use the local filename if filename is missing
kwargs["filename"] = file.split(os.path.sep)[-1]
return self.api_call("files.upload", files={"file": file}, data=kwargs)
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data)
def groups_archive(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Archives a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.archive", json=kwargs)
def groups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Creates a private channel.
Args:
name (str): The name of the private group. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("groups.create", json=kwargs)
def groups_createChild(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Clones and archives a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.createChild", http_verb="GET", params=kwargs)
def groups_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.history", http_verb="GET", params=kwargs)
def groups_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.info", http_verb="GET", params=kwargs)
def groups_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Invites a user to a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.invite", json=kwargs)
def groups_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.kick", json=kwargs)
def groups_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Leaves a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.leave", json=kwargs)
def groups_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists private channels that the calling user has access to."""
return self.api_call("groups.list", http_verb="GET", params=kwargs)
def groups_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a private channel.
Args:
channel (str): Private channel to set reading cursor in. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("groups.mark", json=kwargs)
def groups_open(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Opens a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.open", json=kwargs)
def groups_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("groups.rename", json=kwargs)
def groups_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a private channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("groups.replies", http_verb="GET", params=kwargs)
def groups_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs)
def groups_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("groups.setTopic", json=kwargs)
def groups_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Unarchives a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.unarchive", json=kwargs)
def im_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Close a direct message channel.
Args:
channel (str): Direct message channel to close. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.close", json=kwargs)
def im_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from direct message channel.
Args:
channel (str): Direct message channel to fetch history from. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.history", http_verb="GET", params=kwargs)
def im_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists direct message channels for the calling user."""
return self.api_call("im.list", http_verb="GET", params=kwargs)
def im_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a direct message channel.
Args:
channel (str): Direct message channel to set reading cursor in. e.g. 'D1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("im.mark", json=kwargs)
def im_open(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs)
def im_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a direct message conversation
Args:
channel (str): Direct message channel to fetch thread from. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("im.replies", http_verb="GET", params=kwargs)
def migration_exchange(
self, *, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""For Enterprise Grid workspaces, map local user IDs to global user IDs
Args:
users (str or list): A list of user ids, up to 400 per request.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("migration.exchange", http_verb="GET", params=kwargs)
def mpim_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Closes a multiparty direct message channel.
Args:
channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.close", json=kwargs)
def mpim_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a multiparty direct message.
Args:
channel (str): Multiparty direct message to fetch history for. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.history", http_verb="GET", params=kwargs)
def mpim_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists multiparty direct message channels for the calling user."""
return self.api_call("mpim.list", http_verb="GET", params=kwargs)
def mpim_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a multiparty direct message channel.
Args:
channel (str): Multiparty direct message channel to set reading cursor in.
e.g. 'G1234567890'
ts (str): Timestamp of the most recently seen message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("mpim.mark", json=kwargs)
def mpim_open(
self, *, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""This method opens a multiparty direct message.
Args:
users (str or list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("mpim.open", json=kwargs)
def mpim_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a direct message conversation from a
multiparty direct message.
Args:
channel (str): Multiparty direct message channel to fetch thread from.
e.g. 'G1234567890'
thread_ts (str): Unique identifier of a thread's parent message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("mpim.replies", http_verb="GET", params=kwargs)
def oauth_v2_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.v2.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def pins_add(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Pins an item to a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.add", json=kwargs)
def pins_list(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Lists items pinned to a channel.
Args:
channel (str): Channel to get pinned items for. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.list", http_verb="GET", params=kwargs)
def pins_remove(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Un-pins an item from a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.remove", json=kwargs)
def reactions_add(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs)
def reactions_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets reactions for an item."""
return self.api_call("reactions.get", http_verb="GET", params=kwargs)
def reactions_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists reactions made by a user."""
return self.api_call("reactions.list", http_verb="GET", params=kwargs)
def reactions_remove(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Removes a reaction from an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
"""
kwargs.update({"name": name})
return self.api_call("reactions.remove", json=kwargs)
def reminders_add(
self, *, text: str, time: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs)
def reminders_complete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Marks a reminder as complete.
Args:
reminder (str): The ID of the reminder to be marked as complete.
e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
def reminders_delete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.delete", json=kwargs)
def reminders_info(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Gets information about a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.info", http_verb="GET", params=kwargs)
def reminders_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all reminders created by or for a given user."""
return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def rtm_connect(self, **kwargs) -> Union[Future, SlackResponse]:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.connect", http_verb="GET", params=kwargs)
def rtm_start(self, **kwargs) -> Union[Future, SlackResponse]:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.start", http_verb="GET", params=kwargs)
def search_all(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for messages and files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.all", http_verb="GET", params=kwargs)
def search_files(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.files", http_verb="GET", params=kwargs)
def search_messages(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for messages matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.messages", http_verb="GET", params=kwargs)
def stars_add(self, **kwargs) -> Union[Future, SlackResponse]:
"""Adds a star to an item.
Args:
channel (str): Channel to add star to, or channel where the message to add
star to was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to add star to. e.g. 'F1234567890'
file_comment (str): File comment to add star to. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to add star to. e.g. '1234567890.123456'
"""
return self.api_call("stars.add", json=kwargs)
def stars_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists stars for a user."""
return self.api_call("stars.list", http_verb="GET", params=kwargs)
def stars_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Removes a star from an item.
Args:
channel (str): Channel to remove star from, or channel where
the message to remove star from was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to remove star from. e.g. 'F1234567890'
file_comment (str): File comment to remove star from. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to remove star from. e.g. '1234567890.123456'
"""
return self.api_call("stars.remove", json=kwargs)
def team_accessLogs(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets the access logs for the current team."""
return self.api_call("team.accessLogs", http_verb="GET", params=kwargs)
def team_billableInfo(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets billable users information for the current team."""
return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
def team_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about the current team."""
return self.api_call("team.info", http_verb="GET", params=kwargs)
def team_integrationLogs(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets the integration logs for the current team."""
return self.api_call("team.integrationLogs", http_verb="GET", params=kwargs)
def team_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve a team's profile."""
return self.api_call("team.profile.get", http_verb="GET", params=kwargs)
def usergroups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Create a User Group
Args:
name (str): A name for the User Group. Must be unique among User Groups.
e.g. 'My Test Team'
"""
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs)
def usergroups_disable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs)
def usergroups_enable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Enable a User Group
Args:
usergroup (str): The encoded ID of the User Group to enable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.enable", json=kwargs)
def usergroups_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all User Groups for a team"""
return self.api_call("usergroups.list", http_verb="GET", params=kwargs)
def usergroups_update(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Update an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.update", json=kwargs)
def usergroups_users_list(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all users in a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
def usergroups_users_update(
self, *, usergroup: str, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (str or list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
"""
kwargs.update({"usergroup": usergroup})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("usergroups.users.update", json=kwargs)
def users_conversations(self, **kwargs) -> Union[Future, SlackResponse]:
"""List conversations the calling user may access."""
return self.api_call("users.conversations", http_verb="GET", params=kwargs)
def users_deletePhoto(self, **kwargs) -> Union[Future, SlackResponse]:
"""Delete the user profile photo"""
return self.api_call("users.deletePhoto", http_verb="GET", params=kwargs)
def users_getPresence(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
def users_identity(self, **kwargs) -> Union[Future, SlackResponse]:
"""Get a user's identity."""
return self.api_call("users.identity", http_verb="GET", params=kwargs)
def users_info(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a user.
Args:
user (str): User to get info on.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.info", http_verb="GET", params=kwargs)
def users_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all users in a Slack team."""
return self.api_call("users.list", http_verb="GET", params=kwargs)
def users_lookupByEmail(
self, *, email: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. 'spengler@ghostbusters.example.com'
"""
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
def users_setPhoto(
self, *, image: Union[str, IOBase], **kwargs
) -> Union[Future, SlackResponse]:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
def users_setPresence(
self, *, presence: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Manually sets user presence.
Args:
presence (str): Either 'auto' or 'away'.
"""
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs)
def users_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieves a user's profile information."""
return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
def users_profile_set(self, **kwargs) -> Union[Future, SlackResponse]:
"""Set the profile information for a user."""
return self.api_call("users.profile.set", json=kwargs)
def views_open(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> Union[Future, SlackResponse]:
"""Open a view for a user.
Open a modal with a user by exchanging a trigger_id received
from another interaction.
See the modals (https://api.slack.com/block-kit/surfaces/modals)
documentation to learn how to obtain triggers from interactive components.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.open", json=kwargs)
def views_push(
self, *, trigger_id: str, view: dict, **kwargs
) -> Union[Future, SlackResponse]:
"""Push a view onto the stack of a root view.
Push a new view onto the existing view stack by passing a view
payload and a valid trigger_id generated from an interaction
within the existing modal.
Read the modals documentation (https://api.slack.com/block-kit/surfaces/modals)
to learn more about the lifecycle and intricacies of views.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict): The view payload.
"""
kwargs.update({"trigger_id": trigger_id, "view": view})
return self.api_call("views.push", json=kwargs)
def views_update(
self, *, view: dict, external_id: str = None, view_id: str = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Update an existing view.
Update a view by passing a new view definition along with the
view_id returned in views.open or the external_id.
See the modals documentation (https://api.slack.com/block-kit/surfaces/modals#updating_views)
to learn more about updating views and avoiding race conditions with the hash argument.
Args:
view (dict): The view payload.
external_id (str): A unique identifier of the view set by the developer.
e.g. 'bmarley_view2'
view_id (str): A unique identifier of the view to be updated.
e.g. 'VMM512F2U'
Raises:
SlackRequestError: Either view_id or external_id is required.
"""
kwargs.update({"view": view})
if external_id:
kwargs.update({"external_id": external_id})
elif view_id:
kwargs.update({"view_id": view_id})
else:
raise e.SlackRequestError("Either view_id or external_id is required.")
return self.api_call("views.update", json=kwargs)
def views_publish(
self, *, user_id: str, view: dict, **kwargs
) -> Union[Future, SlackResponse]:
"""Publish a static view for a User.
Create or update the view that comprises an
app's Home tab (https://api.slack.com/surfaces/tabs)
for a specific user.
Args:
user_id (str): id of the user you want publish a view to.
e.g. 'U0BPQUNTA'
view (dict): The view payload.
"""
kwargs.update({"user_id": user_id, "view": view})
return self.api_call("views.publish", json=kwargs)
| 39.768151 | 120 | 0.60082 | import os
from asyncio import Future
from io import IOBase
from typing import Union, List, Optional, Dict
import slack.errors as e
from slack.web.base_client import BaseClient, SlackResponse
from slack.web.classes.views import View
class WebClient(BaseClient):
def admin_apps_approve(
self, *, app_id: str = None, request_id: str = None, **kwargs
) -> Union[Future, SlackResponse]:
if app_id:
kwargs.update({"app_id": app_id})
elif request_id:
kwargs.update({"request_id": request_id})
else:
raise e.SlackRequestError(
"The app_id or request_id argument must be specified."
)
return self.api_call("admin.apps.approve", json=kwargs)
def admin_apps_approved_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.apps.approved.list", http_verb="GET", params=kwargs)
def admin_apps_requests_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.apps.requests.list", http_verb="GET", params=kwargs)
def admin_apps_restrict(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.apps.restrict", json=kwargs)
def admin_apps_restricted_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call(
"admin.apps.restricted.list", http_verb="GET", params=kwargs
)
def admin_conversations_restrictAccess_addGroup(
self, **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call(
"admin.conversations.restrictAccess.addGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_listGroups(
self, **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call(
"admin.conversations.restrictAccess.listGroups",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_removeGroup(
self, **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call(
"admin.conversations.restrictAccess.removeGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_setTeams(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.conversations.setTeams", json=kwargs)
def admin_emoji_add(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.emoji.add", http_verb="GET", params=kwargs)
def admin_emoji_addAlias(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.emoji.addAlias", http_verb="GET", params=kwargs)
def admin_emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.emoji.list", http_verb="GET", params=kwargs)
def admin_emoji_remove(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.emoji.remove", http_verb="GET", params=kwargs)
def admin_emoji_rename(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.emoji.rename", http_verb="GET", params=kwargs)
def admin_users_session_reset(
self, *, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"user_id": user_id})
return self.api_call("admin.users.session.reset", json=kwargs)
def admin_inviteRequests_approve(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.approve", json=kwargs)
def admin_inviteRequests_approved_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call("admin.inviteRequests.approved.list", json=kwargs)
def admin_inviteRequests_denied_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call("admin.inviteRequests.denied.list", json=kwargs)
def admin_inviteRequests_deny(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.deny", json=kwargs)
def admin_inviteRequests_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.inviteRequests.list", json=kwargs)
def admin_teams_admins_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.admins.list", http_verb="GET", params=kwargs)
def admin_teams_create(
self, *, team_domain: str, team_name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_domain": team_domain, "team_name": team_name})
return self.api_call("admin.teams.create", json=kwargs)
def admin_teams_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("admin.teams.list", json=kwargs)
def admin_teams_owners_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.owners.list", http_verb="GET", params=kwargs)
def admin_teams_settings_info(
self, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.settings.info", json=kwargs)
def admin_teams_settings_setDefaultChannels(
self, *, team_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call(
"admin.teams.settings.setDefaultChannels", http_verb="GET", params=kwargs
)
def admin_teams_settings_setDescription(
self, *, team_id: str, description: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "description": description})
return self.api_call("admin.teams.settings.setDescription", json=kwargs)
def admin_teams_settings_setDiscoverability(
self, *, team_id: str, discoverability: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "discoverability": discoverability})
return self.api_call("admin.teams.settings.setDiscoverability", json=kwargs)
def admin_teams_settings_setIcon(
self, *, team_id: str, image_url: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "image_url": image_url})
return self.api_call(
"admin.teams.settings.setIcon", http_verb="GET", params=kwargs
)
def admin_teams_settings_setName(
self, *, team_id: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "name": name})
return self.api_call("admin.teams.settings.setName", json=kwargs)
def admin_usergroups_addChannels(
self,
*,
team_id: str,
usergroup_id: str,
channel_ids: Union[str, List[str]],
**kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.addChannels", json=kwargs)
def admin_usergroups_listChannels(
self, *, usergroup_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup_id": usergroup_id})
return self.api_call("admin.usergroups.listChannels", json=kwargs)
def admin_usergroups_removeChannels(
self, *, usergroup_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.removeChannels", json=kwargs)
def admin_users_assign(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.assign", json=kwargs)
def admin_users_invite(
self, *, team_id: str, email: str, channel_ids: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "email": email})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.users.invite", json=kwargs)
def admin_users_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id})
return self.api_call("admin.users.list", json=kwargs)
def admin_users_remove(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.remove", json=kwargs)
def admin_users_setAdmin(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setAdmin", json=kwargs)
def admin_users_setExpiration(
self, *, expiration_ts: int, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update(
{"expiration_ts": expiration_ts, "team_id": team_id, "user_id": user_id}
)
return self.api_call("admin.users.setExpiration", json=kwargs)
def admin_users_setOwner(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setOwner", json=kwargs)
def admin_users_setRegular(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setRegular", json=kwargs)
def api_test(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("api.test", json=kwargs)
def auth_revoke(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("auth.revoke", http_verb="GET", params=kwargs)
def auth_test(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("auth.test", json=kwargs)
def bots_info(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("bots.info", http_verb="GET", params=kwargs)
def calls_add(
self, *, external_unique_id: str, join_url: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"external_unique_id": external_unique_id, "join_url": join_url})
self._update_call_participants(kwargs, kwargs.get("users", None))
return self.api_call("calls.add", http_verb="POST", params=kwargs)
def calls_end(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"id": id})
return self.api_call("calls.end", http_verb="POST", params=kwargs)
def calls_info(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"id": id})
return self.api_call("calls.info", http_verb="POST", params=kwargs)
def calls_participants_add(
self, *, id: str, users: Union[str, List[Dict[str, str]]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"id": id})
self._update_call_participants(kwargs, users)
return self.api_call("calls.participants.add", http_verb="POST", params=kwargs)
def calls_update(self, *, id: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"id": id})
return self.api_call("calls.update", http_verb="POST", params=kwargs)
def channels_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("channels.archive", json=kwargs)
def channels_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("channels.create", json=kwargs)
def channels_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("channels.history", http_verb="GET", params=kwargs)
def channels_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("channels.info", http_verb="GET", params=kwargs)
def channels_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.invite", json=kwargs)
def channels_join(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("channels.join", json=kwargs)
def channels_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.kick", json=kwargs)
def channels_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("channels.leave", json=kwargs)
def channels_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("channels.list", http_verb="GET", params=kwargs)
def channels_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("channels.mark", json=kwargs)
def channels_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs)
def channels_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
def channels_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("channels.setPurpose", json=kwargs)
def channels_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("channels.setTopic", json=kwargs)
def channels_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("channels.unarchive", json=kwargs)
def chat_delete(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs)
def chat_deleteScheduledMessage(
self, *, channel: str, scheduled_message_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update(
{"channel": channel, "scheduled_message_id": scheduled_message_id}
)
return self.api_call("chat.deleteScheduledMessage", json=kwargs)
def chat_getPermalink(
self, *, channel: str, message_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "message_ts": message_ts})
return self.api_call("chat.getPermalink", http_verb="GET", params=kwargs)
def chat_meMessage(
self, *, channel: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs)
def chat_postEphemeral(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.postEphemeral", json=kwargs)
def chat_postMessage(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.postMessage", json=kwargs)
def chat_scheduleMessage(
self, *, channel: str, post_at: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "post_at": post_at, "text": text})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.scheduleMessage", json=kwargs)
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
def chat_update(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
self._parse_web_class_objects(kwargs)
return self.api_call("chat.update", json=kwargs)
def chat_scheduledMessages_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("chat.scheduledMessages.list", json=kwargs)
def conversations_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.archive", json=kwargs)
def conversations_close(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.close", json=kwargs)
def conversations_create(
self, *, name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("conversations.create", json=kwargs)
def conversations_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.history", http_verb="GET", params=kwargs)
def conversations_info(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.info", http_verb="GET", params=kwargs)
def conversations_invite(
self, *, channel: str, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("conversations.invite", json=kwargs)
def conversations_join(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.join", json=kwargs)
def conversations_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
return self.api_call("conversations.kick", json=kwargs)
def conversations_leave(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.leave", json=kwargs)
def conversations_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("conversations.list", http_verb="GET", params=kwargs)
def conversations_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.mark", json=kwargs)
def conversations_members(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.members", http_verb="GET", params=kwargs)
def conversations_open(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("conversations.open", json=kwargs)
def conversations_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "name": name})
return self.api_call("conversations.rename", json=kwargs)
def conversations_replies(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
def conversations_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("conversations.setPurpose", json=kwargs)
def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs)
def conversations_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("conversations.unarchive", json=kwargs)
def dialog_open(
self, *, dialog: dict, trigger_id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs)
def dnd_endDnd(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("dnd.endDnd", json=kwargs)
def dnd_endSnooze(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("dnd.endSnooze", json=kwargs)
def dnd_info(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("dnd.info", http_verb="GET", params=kwargs)
def dnd_setSnooze(
self, *, num_minutes: int, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
def dnd_teamInfo(
self, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("dnd.teamInfo", http_verb="GET", params=kwargs)
def emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("emoji.list", http_verb="GET", params=kwargs)
def files_comments_delete(
self, *, file: str, id: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
def files_delete(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"file": file})
return self.api_call("files.delete", json=kwargs)
def files_info(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"file": file})
return self.api_call("files.info", http_verb="GET", params=kwargs)
def files_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("files.list", http_verb="GET", params=kwargs)
def files_remote_info(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("files.remote.info", http_verb="GET", params=kwargs)
def files_remote_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("files.remote.list", http_verb="GET", params=kwargs)
def files_remote_add(
self, *, external_id: str, external_url: str, title: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update(
{"external_id": external_id, "external_url": external_url, "title": title}
)
files = None
if "preview_image" in kwargs:
files = {"preview_image": kwargs.pop("preview_image")}
return self.api_call(
"files.remote.add",
http_verb="POST",
data=kwargs,
files=files,
)
def files_remote_update(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("files.remote.update", http_verb="GET", params=kwargs)
def files_remote_remove(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("files.remote.remove", http_verb="GET", params=kwargs)
def files_remote_share(
self, *, channels: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
if isinstance(channels, list):
kwargs.update({"channels": ",".join(channels)})
else:
kwargs.update({"channels": channels})
return self.api_call("files.remote.share", http_verb="GET", params=kwargs)
def files_revokePublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"file": file})
return self.api_call("files.revokePublicURL", json=kwargs)
def files_sharedPublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"file": file})
return self.api_call("files.sharedPublicURL", json=kwargs)
def files_upload(
self, *, file: Union[str, IOBase] = None, content: str = None, **kwargs
) -> Union[Future, SlackResponse]:
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
if "filename" not in kwargs:
kwargs["filename"] = file.split(os.path.sep)[-1]
return self.api_call("files.upload", files={"file": file}, data=kwargs)
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data)
def groups_archive(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.archive", json=kwargs)
def groups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("groups.create", json=kwargs)
def groups_createChild(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.createChild", http_verb="GET", params=kwargs)
def groups_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.history", http_verb="GET", params=kwargs)
def groups_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.info", http_verb="GET", params=kwargs)
def groups_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.invite", json=kwargs)
def groups_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.kick", json=kwargs)
def groups_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.leave", json=kwargs)
def groups_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("groups.list", http_verb="GET", params=kwargs)
def groups_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("groups.mark", json=kwargs)
def groups_open(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.open", json=kwargs)
def groups_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "name": name})
return self.api_call("groups.rename", json=kwargs)
def groups_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("groups.replies", http_verb="GET", params=kwargs)
def groups_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs)
def groups_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("groups.setTopic", json=kwargs)
def groups_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("groups.unarchive", json=kwargs)
def im_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("im.close", json=kwargs)
def im_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("im.history", http_verb="GET", params=kwargs)
def im_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("im.list", http_verb="GET", params=kwargs)
def im_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("im.mark", json=kwargs)
def im_open(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs)
def im_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("im.replies", http_verb="GET", params=kwargs)
def migration_exchange(
self, *, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("migration.exchange", http_verb="GET", params=kwargs)
def mpim_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("mpim.close", json=kwargs)
def mpim_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("mpim.history", http_verb="GET", params=kwargs)
def mpim_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("mpim.list", http_verb="GET", params=kwargs)
def mpim_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("mpim.mark", json=kwargs)
def mpim_open(
self, *, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("mpim.open", json=kwargs)
def mpim_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("mpim.replies", http_verb="GET", params=kwargs)
def oauth_v2_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.v2.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def pins_add(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("pins.add", json=kwargs)
def pins_list(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("pins.list", http_verb="GET", params=kwargs)
def pins_remove(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"channel": channel})
return self.api_call("pins.remove", json=kwargs)
def reactions_add(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs)
def reactions_get(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("reactions.get", http_verb="GET", params=kwargs)
def reactions_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("reactions.list", http_verb="GET", params=kwargs)
def reactions_remove(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("reactions.remove", json=kwargs)
def reminders_add(
self, *, text: str, time: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs)
def reminders_complete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
def reminders_delete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"reminder": reminder})
return self.api_call("reminders.delete", json=kwargs)
def reminders_info(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"reminder": reminder})
return self.api_call("reminders.info", http_verb="GET", params=kwargs)
def reminders_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def rtm_connect(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("rtm.connect", http_verb="GET", params=kwargs)
def rtm_start(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("rtm.start", http_verb="GET", params=kwargs)
def search_all(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"query": query})
return self.api_call("search.all", http_verb="GET", params=kwargs)
def search_files(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"query": query})
return self.api_call("search.files", http_verb="GET", params=kwargs)
def search_messages(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"query": query})
return self.api_call("search.messages", http_verb="GET", params=kwargs)
def stars_add(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("stars.add", json=kwargs)
def stars_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("stars.list", http_verb="GET", params=kwargs)
def stars_remove(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("stars.remove", json=kwargs)
def team_accessLogs(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("team.accessLogs", http_verb="GET", params=kwargs)
def team_billableInfo(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
def team_info(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("team.info", http_verb="GET", params=kwargs)
def team_integrationLogs(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("team.integrationLogs", http_verb="GET", params=kwargs)
def team_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("team.profile.get", http_verb="GET", params=kwargs)
def usergroups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs)
def usergroups_disable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs)
def usergroups_enable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.enable", json=kwargs)
def usergroups_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("usergroups.list", http_verb="GET", params=kwargs)
def usergroups_update(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.update", json=kwargs)
def usergroups_users_list(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
def usergroups_users_update(
self, *, usergroup: str, users: Union[str, List[str]], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"usergroup": usergroup})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("usergroups.users.update", json=kwargs)
def users_conversations(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.conversations", http_verb="GET", params=kwargs)
def users_deletePhoto(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.deletePhoto", http_verb="GET", params=kwargs)
def users_getPresence(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
def users_identity(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.identity", http_verb="GET", params=kwargs)
def users_info(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
kwargs.update({"user": user})
return self.api_call("users.info", http_verb="GET", params=kwargs)
def users_list(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.list", http_verb="GET", params=kwargs)
def users_lookupByEmail(
self, *, email: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
def users_setPhoto(
self, *, image: Union[str, IOBase], **kwargs
) -> Union[Future, SlackResponse]:
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
def users_setPresence(
self, *, presence: str, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs)
def users_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
def users_profile_set(self, **kwargs) -> Union[Future, SlackResponse]:
return self.api_call("users.profile.set", json=kwargs)
def views_open(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"trigger_id": trigger_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.open", json=kwargs)
def views_push(
self, *, trigger_id: str, view: dict, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"trigger_id": trigger_id, "view": view})
return self.api_call("views.push", json=kwargs)
def views_update(
self, *, view: dict, external_id: str = None, view_id: str = None, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"view": view})
if external_id:
kwargs.update({"external_id": external_id})
elif view_id:
kwargs.update({"view_id": view_id})
else:
raise e.SlackRequestError("Either view_id or external_id is required.")
return self.api_call("views.update", json=kwargs)
def views_publish(
self, *, user_id: str, view: dict, **kwargs
) -> Union[Future, SlackResponse]:
kwargs.update({"user_id": user_id, "view": view})
return self.api_call("views.publish", json=kwargs)
| true | true |
1c3397cdc024cd5f0e653607da67ecfb3686301f | 916 | py | Python | aalh_iit_churches_002/populate-subject-column.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_churches_002/populate-subject-column.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_churches_002/populate-subject-column.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | from openpyxl import load_workbook
filename = 'aalh_iit_churches_002.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 43
iterationrow = 7
descol = 8
subcol = 9
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
testvar = ws.cell(row=iterationrow, column=descol).value
for cell in row:
if testvar.find('dwelling') != -1:
ws.cell(row=iterationrow, column=subcol).value = 'Dwellings. Photographs.'
elif testvar.find('Dwelling') != -1:
ws.cell(row=iterationrow, column=subcol).value = 'Dwellings. Photographs.'
else:
ws.cell(row=iterationrow, column=subcol).value = 'Buildings. Photographs.'
iterationrow = iterationrow + 1
print('*****COMPLETED*****')
#wb.save('aalh_iit_churches_002.xlsx') | 33.925926 | 105 | 0.673581 | from openpyxl import load_workbook
filename = 'aalh_iit_churches_002.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 43
iterationrow = 7
descol = 8
subcol = 9
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
testvar = ws.cell(row=iterationrow, column=descol).value
for cell in row:
if testvar.find('dwelling') != -1:
ws.cell(row=iterationrow, column=subcol).value = 'Dwellings. Photographs.'
elif testvar.find('Dwelling') != -1:
ws.cell(row=iterationrow, column=subcol).value = 'Dwellings. Photographs.'
else:
ws.cell(row=iterationrow, column=subcol).value = 'Buildings. Photographs.'
iterationrow = iterationrow + 1
print('*****COMPLETED*****')
| true | true |
1c3397e48e30fc936f6d18f9d7a3a6b56c6b794d | 2,551 | py | Python | readchip.py | mpratt14/FT232H-flash | bc542460b2bba966db910d21100a3b351c877919 | [
"MIT"
] | 3 | 2021-02-11T15:26:02.000Z | 2021-07-24T22:03:39.000Z | readchip.py | mpratt14/FT232H-flash | bc542460b2bba966db910d21100a3b351c877919 | [
"MIT"
] | null | null | null | readchip.py | mpratt14/FT232H-flash | bc542460b2bba966db910d21100a3b351c877919 | [
"MIT"
] | null | null | null | import board as FTDI
import busio as Serial
import digitalio as GPIO
#import time
# Variables
block_start = 0
numblocks = 256
sector_start = 0
#numsectors = 16
# Configuration
CS_pin = FTDI.D7
baudrate = 40000000
# Constants
read = 0x03
sector_length = 0x1000
block_sectors = 0x10
sector_pages = 0x10
page_start = 0x00
cell = 0x00
# instantiation, lock board for use, configure frequency, set CS pin
FT232H = Serial.SPI(FTDI.SCLK, FTDI.MOSI, FTDI.MISO)
while not FT232H.try_lock(): pass
FT232H.configure(baudrate)
CS = GPIO.DigitalInOut(CS_pin)
CS.direction = GPIO.Direction.OUTPUT
response = [0] * sector_length
payload = bytearray()
numreads = 0
# main
# handle variable range, set relative variables
block_start += sector_start // block_sectors
block_start_print = block_start
sector_start_print = sector_start = sector_start % block_sectors
if 'numsectors' not in locals():
numsectors = block_sectors
if numsectors != block_sectors:
numblocks = (sector_start + numsectors) // block_sectors + 1
# check existance
try:
check = open('response.bin', 'r+b')
check.close()
print('Careful! response.bin already exists, rename it!! \n\n')
quit()
except:
pass
# open for read
dump = open('response.bin', 'a+b')
# read cycle
for blocknum in range(numblocks):
if (numblocks > 1) & (numblocks - 1 == blocknum) & (numsectors != block_sectors) & (sector_start != 0):
numsectors = (sector_end + numsectors) % block_sectors
for sectornum in range(numsectors):
block = block_start + blocknum
sector = sector_start + sectornum
page = page_start + (sector * sector_pages)
if sector == block_sectors:
if blocknum == 0:
sector_end = sector_start
sector_start = 0
break
instruction = [read, block, page, cell]
print('reading block', block, 'sector', sector)
numreads += 1
CS.value = False
FT232H.write(instruction)
FT232H.readinto(response)
CS.value = True
#time.sleep(0.1)
# write payload to file
for i in range(sector_length):
payload.extend(bytes([response[i]]))
dump.write(payload)
dump.flush()
# clear buffers
response = [0] * sector_length
payload = bytearray()
#time.sleep(0.1)
# Close
dump.close()
FT232H.unlock()
print('DONE')
print('read', numreads, 'sectors starting at block', block_start_print, 'sector', sector_start_print) | 17.472603 | 107 | 0.653469 | import board as FTDI
import busio as Serial
import digitalio as GPIO
block_start = 0
numblocks = 256
sector_start = 0
CS_pin = FTDI.D7
baudrate = 40000000
read = 0x03
sector_length = 0x1000
block_sectors = 0x10
sector_pages = 0x10
page_start = 0x00
cell = 0x00
FT232H = Serial.SPI(FTDI.SCLK, FTDI.MOSI, FTDI.MISO)
while not FT232H.try_lock(): pass
FT232H.configure(baudrate)
CS = GPIO.DigitalInOut(CS_pin)
CS.direction = GPIO.Direction.OUTPUT
response = [0] * sector_length
payload = bytearray()
numreads = 0
block_start += sector_start // block_sectors
block_start_print = block_start
sector_start_print = sector_start = sector_start % block_sectors
if 'numsectors' not in locals():
numsectors = block_sectors
if numsectors != block_sectors:
numblocks = (sector_start + numsectors) // block_sectors + 1
try:
check = open('response.bin', 'r+b')
check.close()
print('Careful! response.bin already exists, rename it!! \n\n')
quit()
except:
pass
dump = open('response.bin', 'a+b')
for blocknum in range(numblocks):
if (numblocks > 1) & (numblocks - 1 == blocknum) & (numsectors != block_sectors) & (sector_start != 0):
numsectors = (sector_end + numsectors) % block_sectors
for sectornum in range(numsectors):
block = block_start + blocknum
sector = sector_start + sectornum
page = page_start + (sector * sector_pages)
if sector == block_sectors:
if blocknum == 0:
sector_end = sector_start
sector_start = 0
break
instruction = [read, block, page, cell]
print('reading block', block, 'sector', sector)
numreads += 1
CS.value = False
FT232H.write(instruction)
FT232H.readinto(response)
CS.value = True
for i in range(sector_length):
payload.extend(bytes([response[i]]))
dump.write(payload)
dump.flush()
response = [0] * sector_length
payload = bytearray()
dump.close()
FT232H.unlock()
print('DONE')
print('read', numreads, 'sectors starting at block', block_start_print, 'sector', sector_start_print) | true | true |
1c3398935e4f1c08d4d352959efa3e27f3ac89e5 | 1,080 | py | Python | filereader.py | lotusronin/EmuMan | decc8e4e3299ed5c52cb699ccdf3d8b1c6113adb | [
"MIT"
] | null | null | null | filereader.py | lotusronin/EmuMan | decc8e4e3299ed5c52cb699ccdf3d8b1c6113adb | [
"MIT"
] | null | null | null | filereader.py | lotusronin/EmuMan | decc8e4e3299ed5c52cb699ccdf3d8b1c6113adb | [
"MIT"
] | null | null | null | # File: filereader.py
import configparser
import os
file_name = os.path.dirname(os.path.abspath(__file__)) + "/config.txt"
print(file_name)
class FileReader:
def read_config(self):
try :
self.config = configparser.ConfigParser()
self.config.readfp(open(file_name))
except IOError:
print("Config file not found.\nMaking new file")
f = open(file_name, 'w')
f.write("# This is a config file for EmuMan")
f.close()
def ret_path(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], "emupath")
def ret_rom(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'rompath')
def get_num_consoles(self) :
section_list = self.config.sections()
x = 0
for item in section_list :
x += 1
return x
def get_console(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'console')
def get_db(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'dbfile')
| 20.769231 | 70 | 0.67963 |
import configparser
import os
file_name = os.path.dirname(os.path.abspath(__file__)) + "/config.txt"
print(file_name)
class FileReader:
def read_config(self):
try :
self.config = configparser.ConfigParser()
self.config.readfp(open(file_name))
except IOError:
print("Config file not found.\nMaking new file")
f = open(file_name, 'w')
f.write("# This is a config file for EmuMan")
f.close()
def ret_path(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], "emupath")
def ret_rom(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'rompath')
def get_num_consoles(self) :
section_list = self.config.sections()
x = 0
for item in section_list :
x += 1
return x
def get_console(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'console')
def get_db(self, n) :
section_list = self.config.sections()
return self.config.get(section_list[n], 'dbfile')
| true | true |
1c33991366428f5d52c673c2fea88cb836034b3a | 21,097 | py | Python | sympy/geometry/tests/test_ellipse.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | 2 | 2019-06-12T16:15:39.000Z | 2019-10-06T10:40:59.000Z | sympy/geometry/tests/test_ellipse.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | 2 | 2017-06-29T14:11:05.000Z | 2022-01-24T09:28:04.000Z | sympy/geometry/tests/test_ellipse.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | 1 | 2015-09-18T17:27:16.000Z | 2015-09-18T17:27:16.000Z | from sympy import Rational, S, Symbol, symbols, pi, sqrt, oo, Point2D, Segment2D, I
from sympy.core.compatibility import range
from sympy.geometry import (Circle, Ellipse, GeometryError, Line, Point, Polygon, Ray, RegularPolygon, Segment,
Triangle, intersection)
from sympy.utilities.pytest import raises, slow
from sympy import integrate
from sympy.functions.special.elliptic_integrals import elliptic_e
from sympy.functions.elementary.miscellaneous import Max
def test_ellipse_equation_using_slope():
from sympy.abc import x, y
e1 = Ellipse(Point(1, 0), 3, 2)
assert str(e1.equation(_slope=1)) == str((-x + y + 1)**2/8 + (x + y - 1)**2/18 - 1)
e2 = Ellipse(Point(0, 0), 4, 1)
assert str(e2.equation(_slope=1)) == str((-x + y)**2/2 + (x + y)**2/32 - 1)
e3 = Ellipse(Point(1, 5), 6, 2)
assert str(e3.equation(_slope=2)) == str((-2*x + y - 3)**2/20 + (x + 2*y - 11)**2/180 - 1)
def test_object_from_equation():
from sympy.abc import x, y, a, b
assert Circle(x**2 + y**2 + 3*x + 4*y - 8) == Circle(Point2D(S(-3) / 2, -2),
sqrt(57) / 2)
assert Circle(x**2 + y**2 + 6*x + 8*y + 25) == Circle(Point2D(-3, -4), 0)
assert Circle(a**2 + b**2 + 6*a + 8*b + 25, x='a', y='b') == Circle(Point2D(-3, -4), 0)
assert Circle(x**2 + y**2 - 25) == Circle(Point2D(0, 0), 5)
assert Circle(x**2 + y**2) == Circle(Point2D(0, 0), 0)
assert Circle(a**2 + b**2, x='a', y='b') == Circle(Point2D(0, 0), 0)
assert Circle(x**2 + y**2 + 6*x + 8) == Circle(Point2D(-3, 0), 1)
assert Circle(x**2 + y**2 + 6*y + 8) == Circle(Point2D(0, -3), 1)
assert Circle(6*(x**2) + 6*(y**2) + 6*x + 8*y - 25) == Circle(Point2D(-S(1)/2, -S(2)/3), 5*sqrt(37)/6)
raises(GeometryError, lambda: Circle(x**2 + y**2 + 3*x + 4*y + 26))
raises(GeometryError, lambda: Circle(x**2 + y**2 + 25))
raises(GeometryError, lambda: Circle(a**2 + b**2 + 25, x='a', y='b'))
raises(GeometryError, lambda: Circle(x**2 + 6*y + 8))
raises(GeometryError, lambda: Circle(6*(x ** 2) + 4*(y**2) + 6*x + 8*y + 25))
raises(ValueError, lambda: Circle(a**2 + b**2 + 3*a + 4*b - 8))
@slow
def test_ellipse_geom():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
t = Symbol('t', real=True)
y1 = Symbol('y1', real=True)
half = Rational(1, 2)
p1 = Point(0, 0)
p2 = Point(1, 1)
p4 = Point(0, 1)
e1 = Ellipse(p1, 1, 1)
e2 = Ellipse(p2, half, 1)
e3 = Ellipse(p1, y1, y1)
c1 = Circle(p1, 1)
c2 = Circle(p2, 1)
c3 = Circle(Point(sqrt(2), sqrt(2)), 1)
l1 = Line(p1, p2)
# Test creation with three points
cen, rad = Point(3*half, 2), 5*half
assert Circle(Point(0, 0), Point(3, 0), Point(0, 4)) == Circle(cen, rad)
assert Circle(Point(0, 0), Point(1, 1), Point(2, 2)) == Segment2D(Point2D(0, 0), Point2D(2, 2))
raises(ValueError, lambda: Ellipse(None, None, None, 1))
raises(GeometryError, lambda: Circle(Point(0, 0)))
# Basic Stuff
assert Ellipse(None, 1, 1).center == Point(0, 0)
assert e1 == c1
assert e1 != e2
assert e1 != l1
assert p4 in e1
assert p2 not in e2
assert e1.area == pi
assert e2.area == pi/2
assert e3.area == pi*y1*abs(y1)
assert c1.area == e1.area
assert c1.circumference == e1.circumference
assert e3.circumference == 2*pi*y1
assert e1.plot_interval() == e2.plot_interval() == [t, -pi, pi]
assert e1.plot_interval(x) == e2.plot_interval(x) == [x, -pi, pi]
assert c1.minor == 1
assert c1.major == 1
assert c1.hradius == 1
assert c1.vradius == 1
assert Ellipse((1, 1), 0, 0) == Point(1, 1)
assert Ellipse((1, 1), 1, 0) == Segment(Point(0, 1), Point(2, 1))
assert Ellipse((1, 1), 0, 1) == Segment(Point(1, 0), Point(1, 2))
# Private Functions
assert hash(c1) == hash(Circle(Point(1, 0), Point(0, 1), Point(0, -1)))
assert c1 in e1
assert (Line(p1, p2) in e1) is False
assert e1.__cmp__(e1) == 0
assert e1.__cmp__(Point(0, 0)) > 0
# Encloses
assert e1.encloses(Segment(Point(-0.5, -0.5), Point(0.5, 0.5))) is True
assert e1.encloses(Line(p1, p2)) is False
assert e1.encloses(Ray(p1, p2)) is False
assert e1.encloses(e1) is False
assert e1.encloses(
Polygon(Point(-0.5, -0.5), Point(-0.5, 0.5), Point(0.5, 0.5))) is True
assert e1.encloses(RegularPolygon(p1, 0.5, 3)) is True
assert e1.encloses(RegularPolygon(p1, 5, 3)) is False
assert e1.encloses(RegularPolygon(p2, 5, 3)) is False
assert e2.arbitrary_point() in e2
# Foci
f1, f2 = Point(sqrt(12), 0), Point(-sqrt(12), 0)
ef = Ellipse(Point(0, 0), 4, 2)
assert ef.foci in [(f1, f2), (f2, f1)]
# Tangents
v = sqrt(2) / 2
p1_1 = Point(v, v)
p1_2 = p2 + Point(half, 0)
p1_3 = p2 + Point(0, 1)
assert e1.tangent_lines(p4) == c1.tangent_lines(p4)
assert e2.tangent_lines(p1_2) == [Line(Point(S(3)/2, 1), Point(S(3)/2, S(1)/2))]
assert e2.tangent_lines(p1_3) == [Line(Point(1, 2), Point(S(5)/4, 2))]
assert c1.tangent_lines(p1_1) != [Line(p1_1, Point(0, sqrt(2)))]
assert c1.tangent_lines(p1) == []
assert e2.is_tangent(Line(p1_2, p2 + Point(half, 1)))
assert e2.is_tangent(Line(p1_3, p2 + Point(half, 1)))
assert c1.is_tangent(Line(p1_1, Point(0, sqrt(2))))
assert e1.is_tangent(Line(Point(0, 0), Point(1, 1))) is False
assert c1.is_tangent(e1) is True
assert c1.is_tangent(Ellipse(Point(2, 0), 1, 1)) is True
assert c1.is_tangent(
Polygon(Point(1, 1), Point(1, -1), Point(2, 0))) is True
assert c1.is_tangent(
Polygon(Point(1, 1), Point(1, 0), Point(2, 0))) is False
assert Circle(Point(5, 5), 3).is_tangent(Circle(Point(0, 5), 1)) is False
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(0, 0)) == \
[Line(Point(0, 0), Point(S(77)/25, S(132)/25)),
Line(Point(0, 0), Point(S(33)/5, S(22)/5))]
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(3, 4)) == \
[Line(Point(3, 4), Point(4, 4)), Line(Point(3, 4), Point(3, 5))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(3, 3)) == \
[Line(Point(3, 3), Point(4, 3)), Line(Point(3, 3), Point(3, 4))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(5 - 2*sqrt(2), 5)) == \
[Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 - sqrt(2))),
Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 + sqrt(2))), ]
# for numerical calculations, we shouldn't demand exact equality,
# so only test up to the desired precision
def lines_close(l1, l2, prec):
""" tests whether l1 and 12 are within 10**(-prec)
of each other """
return abs(l1.p1 - l2.p1) < 10**(-prec) and abs(l1.p2 - l2.p2) < 10**(-prec)
def line_list_close(ll1, ll2, prec):
return all(lines_close(l1, l2, prec) for l1, l2 in zip(ll1, ll2))
e = Ellipse(Point(0, 0), 2, 1)
assert e.normal_lines(Point(0, 0)) == \
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
assert e.normal_lines(Point(1, 0)) == \
[Line(Point(0, 0), Point(1, 0))]
assert e.normal_lines((0, 1)) == \
[Line(Point(0, 0), Point(0, 1))]
assert line_list_close(e.normal_lines(Point(1, 1), 2), [
Line(Point(-S(51)/26, -S(1)/5), Point(-S(25)/26, S(17)/83)),
Line(Point(S(28)/29, -S(7)/8), Point(S(57)/29, -S(9)/2))], 2)
# test the failure of Poly.intervals and checks a point on the boundary
p = Point(sqrt(3), S.Half)
assert p in e
assert line_list_close(e.normal_lines(p, 2), [
Line(Point(-S(341)/171, -S(1)/13), Point(-S(170)/171, S(5)/64)),
Line(Point(S(26)/15, -S(1)/2), Point(S(41)/15, -S(43)/26))], 2)
# be sure to use the slope that isn't undefined on boundary
e = Ellipse((0, 0), 2, 2*sqrt(3)/3)
assert line_list_close(e.normal_lines((1, 1), 2), [
Line(Point(-S(64)/33, -S(20)/71), Point(-S(31)/33, S(2)/13)),
Line(Point(1, -1), Point(2, -4))], 2)
# general ellipse fails except under certain conditions
e = Ellipse((0, 0), x, 1)
assert e.normal_lines((x + 1, 0)) == [Line(Point(0, 0), Point(1, 0))]
raises(NotImplementedError, lambda: e.normal_lines((x + 1, 1)))
# Properties
major = 3
minor = 1
e4 = Ellipse(p2, minor, major)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
assert e4.semilatus_rectum == major*(1 - ecc ** 2)
# independent of orientation
e4 = Ellipse(p2, major, minor)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
# Intersection
l1 = Line(Point(1, -5), Point(1, 5))
l2 = Line(Point(-5, -1), Point(5, -1))
l3 = Line(Point(-1, -1), Point(1, 1))
l4 = Line(Point(-10, 0), Point(0, 10))
pts_c1_l3 = [Point(sqrt(2)/2, sqrt(2)/2), Point(-sqrt(2)/2, -sqrt(2)/2)]
assert intersection(e2, l4) == []
assert intersection(c1, Point(1, 0)) == [Point(1, 0)]
assert intersection(c1, l1) == [Point(1, 0)]
assert intersection(c1, l2) == [Point(0, -1)]
assert intersection(c1, l3) in [pts_c1_l3, [pts_c1_l3[1], pts_c1_l3[0]]]
assert intersection(c1, c2) == [Point(0, 1), Point(1, 0)]
assert intersection(c1, c3) == [Point(sqrt(2)/2, sqrt(2)/2)]
assert e1.intersection(l1) == [Point(1, 0)]
assert e2.intersection(l4) == []
assert e1.intersection(Circle(Point(0, 2), 1)) == [Point(0, 1)]
assert e1.intersection(Circle(Point(5, 0), 1)) == []
assert e1.intersection(Ellipse(Point(2, 0), 1, 1)) == [Point(1, 0)]
assert e1.intersection(Ellipse(Point(5, 0), 1, 1)) == []
assert e1.intersection(Point(2, 0)) == []
assert e1.intersection(e1) == e1
assert intersection(Ellipse(Point(0, 0), 2, 1), Ellipse(Point(3, 0), 1, 2)) == [Point(2, 0)]
assert intersection(Circle(Point(0, 0), 2), Circle(Point(3, 0), 1)) == [Point(2, 0)]
assert intersection(Circle(Point(0, 0), 2), Circle(Point(7, 0), 1)) == []
assert intersection(Ellipse(Point(0, 0), 5, 17), Ellipse(Point(4, 0), 1, 0.2)) == [Point(5, 0)]
assert intersection(Ellipse(Point(0, 0), 5, 17), Ellipse(Point(4, 0), 0.999, 0.2)) == []
assert Circle((0, 0), S(1)/2).intersection(
Triangle((-1, 0), (1, 0), (0, 1))) == [
Point(-S(1)/2, 0), Point(S(1)/2, 0)]
raises(TypeError, lambda: intersection(e2, Line((0, 0, 0), (0, 0, 1))))
raises(TypeError, lambda: intersection(e2, Rational(12)))
# some special case intersections
csmall = Circle(p1, 3)
cbig = Circle(p1, 5)
cout = Circle(Point(5, 5), 1)
# one circle inside of another
assert csmall.intersection(cbig) == []
# separate circles
assert csmall.intersection(cout) == []
# coincident circles
assert csmall.intersection(csmall) == csmall
v = sqrt(2)
t1 = Triangle(Point(0, v), Point(0, -v), Point(v, 0))
points = intersection(t1, c1)
assert len(points) == 4
assert Point(0, 1) in points
assert Point(0, -1) in points
assert Point(v/2, v/2) in points
assert Point(v/2, -v/2) in points
circ = Circle(Point(0, 0), 5)
elip = Ellipse(Point(0, 0), 5, 20)
assert intersection(circ, elip) in \
[[Point(5, 0), Point(-5, 0)], [Point(-5, 0), Point(5, 0)]]
assert elip.tangent_lines(Point(0, 0)) == []
elip = Ellipse(Point(0, 0), 3, 2)
assert elip.tangent_lines(Point(3, 0)) == \
[Line(Point(3, 0), Point(3, -12))]
e1 = Ellipse(Point(0, 0), 5, 10)
e2 = Ellipse(Point(2, 1), 4, 8)
a = S(53)/17
c = 2*sqrt(3991)/17
ans = [Point(a - c/8, a/2 + c), Point(a + c/8, a/2 - c)]
assert e1.intersection(e2) == ans
e2 = Ellipse(Point(x, y), 4, 8)
c = sqrt(3991)
ans = [Point(-c/68 + a, 2*c/17 + a/2), Point(c/68 + a, -2*c/17 + a/2)]
assert [p.subs({x: 2, y:1}) for p in e1.intersection(e2)] == ans
# Combinations of above
assert e3.is_tangent(e3.tangent_lines(p1 + Point(y1, 0))[0])
e = Ellipse((1, 2), 3, 2)
assert e.tangent_lines(Point(10, 0)) == \
[Line(Point(10, 0), Point(1, 0)),
Line(Point(10, 0), Point(S(14)/5, S(18)/5))]
# encloses_point
e = Ellipse((0, 0), 1, 2)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(
e.center + Point(e.hradius + Rational(1, 10), 0)) is False
e = Ellipse((0, 0), 2, 1)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(
e.center + Point(e.hradius + Rational(1, 10), 0)) is False
assert c1.encloses_point(Point(1, 0)) is False
assert c1.encloses_point(Point(0.3, 0.4)) is True
assert e.scale(2, 3) == Ellipse((0, 0), 4, 3)
assert e.scale(3, 6) == Ellipse((0, 0), 6, 6)
assert e.rotate(pi) == e
assert e.rotate(pi, (1, 2)) == Ellipse(Point(2, 4), 2, 1)
raises(NotImplementedError, lambda: e.rotate(pi/3))
# Circle rotation tests (Issue #11743)
# Link - https://github.com/sympy/sympy/issues/11743
cir = Circle(Point(1, 0), 1)
assert cir.rotate(pi/2) == Circle(Point(0, 1), 1)
assert cir.rotate(pi/3) == Circle(Point(S(1)/2, sqrt(3)/2), 1)
assert cir.rotate(pi/3, Point(1, 0)) == Circle(Point(1, 0), 1)
assert cir.rotate(pi/3, Point(0, 1)) == Circle(Point(S(1)/2 + sqrt(3)/2, S(1)/2 + sqrt(3)/2), 1)
def test_construction():
e1 = Ellipse(hradius=2, vradius=1, eccentricity=None)
assert e1.eccentricity == sqrt(3)/2
e2 = Ellipse(hradius=2, vradius=None, eccentricity=sqrt(3)/2)
assert e2.vradius == 1
e3 = Ellipse(hradius=None, vradius=1, eccentricity=sqrt(3)/2)
assert e3.hradius == 2
# filter(None, iterator) filters out anything falsey, including 0
# eccentricity would be filtered out in this case and the constructor would throw an error
e4 = Ellipse(Point(0, 0), hradius=1, eccentricity=0)
assert e4.vradius == 1
def test_ellipse_random_point():
y1 = Symbol('y1', real=True)
e3 = Ellipse(Point(0, 0), y1, y1)
rx, ry = Symbol('rx'), Symbol('ry')
for ind in range(0, 5):
r = e3.random_point()
# substitution should give zero*y1**2
assert e3.equation(rx, ry).subs(zip((rx, ry), r.args)).equals(0)
def test_repr():
assert repr(Circle((0, 1), 2)) == 'Circle(Point2D(0, 1), 2)'
def test_transform():
c = Circle((1, 1), 2)
assert c.scale(-1) == Circle((-1, 1), 2)
assert c.scale(y=-1) == Circle((1, -1), 2)
assert c.scale(2) == Ellipse((2, 1), 4, 2)
assert Ellipse((0, 0), 2, 3).scale(2, 3, (4, 5)) == \
Ellipse(Point(-4, -10), 4, 9)
assert Circle((0, 0), 2).scale(2, 3, (4, 5)) == \
Ellipse(Point(-4, -10), 4, 6)
assert Ellipse((0, 0), 2, 3).scale(3, 3, (4, 5)) == \
Ellipse(Point(-8, -10), 6, 9)
assert Circle((0, 0), 2).scale(3, 3, (4, 5)) == \
Circle(Point(-8, -10), 6)
assert Circle(Point(-8, -10), 6).scale(S(1)/3, S(1)/3, (4, 5)) == \
Circle((0, 0), 2)
assert Circle((0, 0), 2).translate(4, 5) == \
Circle((4, 5), 2)
assert Circle((0, 0), 2).scale(3, 3) == \
Circle((0, 0), 6)
def test_bounds():
e1 = Ellipse(Point(0, 0), 3, 5)
e2 = Ellipse(Point(2, -2), 7, 7)
c1 = Circle(Point(2, -2), 7)
c2 = Circle(Point(-2, 0), Point(0, 2), Point(2, 0))
assert e1.bounds == (-3, -5, 3, 5)
assert e2.bounds == (-5, -9, 9, 5)
assert c1.bounds == (-5, -9, 9, 5)
assert c2.bounds == (-2, -2, 2, 2)
def test_reflect():
b = Symbol('b')
m = Symbol('m')
l = Line((0, b), slope=m)
t1 = Triangle((0, 0), (1, 0), (2, 3))
assert t1.area == -t1.reflect(l).area
e = Ellipse((1, 0), 1, 2)
assert e.area == -e.reflect(Line((1, 0), slope=0)).area
assert e.area == -e.reflect(Line((1, 0), slope=oo)).area
raises(NotImplementedError, lambda: e.reflect(Line((1, 0), slope=m)))
def test_is_tangent():
e1 = Ellipse(Point(0, 0), 3, 5)
c1 = Circle(Point(2, -2), 7)
assert e1.is_tangent(Point(0, 0)) is False
assert e1.is_tangent(Point(3, 0)) is False
assert e1.is_tangent(e1) is True
assert e1.is_tangent(Ellipse((0, 0), 1, 2)) is False
assert e1.is_tangent(Ellipse((0, 0), 3, 2)) is True
assert c1.is_tangent(Ellipse((2, -2), 7, 1)) is True
assert c1.is_tangent(Circle((11, -2), 2)) is True
assert c1.is_tangent(Circle((7, -2), 2)) is True
assert c1.is_tangent(Ray((-5, -2), (-15, -20))) is False
assert c1.is_tangent(Ray((-3, -2), (-15, -20))) is False
assert c1.is_tangent(Ray((-3, -22), (15, 20))) is False
assert c1.is_tangent(Ray((9, 20), (9, -20))) is True
assert e1.is_tangent(Segment((2, 2), (-7, 7))) is False
assert e1.is_tangent(Segment((0, 0), (1, 2))) is False
assert c1.is_tangent(Segment((0, 0), (-5, -2))) is False
assert e1.is_tangent(Segment((3, 0), (12, 12))) is False
assert e1.is_tangent(Segment((12, 12), (3, 0))) is False
assert e1.is_tangent(Segment((-3, 0), (3, 0))) is False
assert e1.is_tangent(Segment((-3, 5), (3, 5))) is True
assert e1.is_tangent(Line((0, 0), (1, 1))) is False
assert e1.is_tangent(Line((-3, 0), (-2.99, -0.001))) is False
assert e1.is_tangent(Line((-3, 0), (-3, 1))) is True
assert e1.is_tangent(Polygon((0, 0), (5, 5), (5, -5))) is False
assert e1.is_tangent(Polygon((-100, -50), (-40, -334), (-70, -52))) is False
assert e1.is_tangent(Polygon((-3, 0), (3, 0), (0, 1))) is False
assert e1.is_tangent(Polygon((-3, 0), (3, 0), (0, 5))) is False
assert e1.is_tangent(Polygon((-3, 0), (0, -5), (3, 0), (0, 5))) is False
assert e1.is_tangent(Polygon((-3, -5), (-3, 5), (3, 5), (3, -5))) is True
assert c1.is_tangent(Polygon((-3, -5), (-3, 5), (3, 5), (3, -5))) is False
assert e1.is_tangent(Polygon((0, 0), (3, 0), (7, 7), (0, 5))) is False
assert e1.is_tangent(Polygon((3, 12), (3, -12), (6, 5))) is True
assert e1.is_tangent(Polygon((3, 12), (3, -12), (0, -5), (0, 5))) is False
assert e1.is_tangent(Polygon((3, 0), (5, 7), (6, -5))) is False
raises(TypeError, lambda: e1.is_tangent(Point(0, 0, 0)))
raises(TypeError, lambda: e1.is_tangent(Rational(5)))
def test_parameter_value():
t = Symbol('t')
e = Ellipse(Point(0, 0), 3, 5)
assert e.parameter_value((3, 0), t) == {t: 0}
raises(ValueError, lambda: e.parameter_value((4, 0), t))
@slow
def test_second_moment_of_area():
x, y = symbols('x, y')
e = Ellipse(Point(0, 0), 5, 4)
I_yy = 2*4*integrate(sqrt(25 - x**2)*x**2, (x, -5, 5))/5
I_xx = 2*5*integrate(sqrt(16 - y**2)*y**2, (y, -4, 4))/4
Y = 3*sqrt(1 - x**2/5**2)
I_xy = integrate(integrate(y, (y, -Y, Y))*x, (x, -5, 5))
assert I_yy == e.second_moment_of_area()[1]
assert I_xx == e.second_moment_of_area()[0]
assert I_xy == e.second_moment_of_area()[2]
def test_circumference():
M = Symbol('M')
m = Symbol('m')
assert Ellipse(Point(0, 0), M, m).circumference == 4 * M * elliptic_e((M ** 2 - m ** 2) / M**2)
assert Ellipse(Point(0, 0), 5, 4).circumference == 20 * elliptic_e(S(9) / 25)
# degenerate ellipse
assert Ellipse(None, 1, None, 1).length == 2
# circle
assert Ellipse(None, 1, None, 0).circumference == 2*pi
# test numerically
assert abs(Ellipse(None, hradius=5, vradius=3).circumference.evalf(16) - 25.52699886339813) < 1e-10
def test_issue_15259():
assert Circle((1, 2), 0) == Point(1, 2)
def test_issue_15797_equals():
Ri = 0.024127189424130748
Ci = (0.0864931002830291, 0.0819863295239654)
A = Point(0, 0.0578591400998346)
c = Circle(Ci, Ri) # evaluated
assert c.is_tangent(c.tangent_lines(A)[0]) == True
assert c.center.x.is_Rational
assert c.center.y.is_Rational
assert c.radius.is_Rational
u = Circle(Ci, Ri, evaluate=False) # unevaluated
assert u.center.x.is_Float
assert u.center.y.is_Float
assert u.radius.is_Float
def test_auxiliary_circle():
x, y, a, b = symbols('x y a b')
e = Ellipse((x, y), a, b)
# the general result
assert e.auxiliary_circle() == Circle((x, y), Max(a, b))
# a special case where Ellipse is a Circle
assert Circle((3, 4), 8).auxiliary_circle() == Circle((3, 4), 8)
def test_director_circle():
x, y, a, b = symbols('x y a b')
e = Ellipse((x, y), a, b)
# the general result
assert e.director_circle() == Circle((x, y), sqrt(a**2 + b**2))
# a special case where Ellipse is a Circle
assert Circle((3, 4), 8).director_circle() == Circle((3, 4), 8*sqrt(2))
| 41.693676 | 111 | 0.582026 | from sympy import Rational, S, Symbol, symbols, pi, sqrt, oo, Point2D, Segment2D, I
from sympy.core.compatibility import range
from sympy.geometry import (Circle, Ellipse, GeometryError, Line, Point, Polygon, Ray, RegularPolygon, Segment,
Triangle, intersection)
from sympy.utilities.pytest import raises, slow
from sympy import integrate
from sympy.functions.special.elliptic_integrals import elliptic_e
from sympy.functions.elementary.miscellaneous import Max
def test_ellipse_equation_using_slope():
from sympy.abc import x, y
e1 = Ellipse(Point(1, 0), 3, 2)
assert str(e1.equation(_slope=1)) == str((-x + y + 1)**2/8 + (x + y - 1)**2/18 - 1)
e2 = Ellipse(Point(0, 0), 4, 1)
assert str(e2.equation(_slope=1)) == str((-x + y)**2/2 + (x + y)**2/32 - 1)
e3 = Ellipse(Point(1, 5), 6, 2)
assert str(e3.equation(_slope=2)) == str((-2*x + y - 3)**2/20 + (x + 2*y - 11)**2/180 - 1)
def test_object_from_equation():
from sympy.abc import x, y, a, b
assert Circle(x**2 + y**2 + 3*x + 4*y - 8) == Circle(Point2D(S(-3) / 2, -2),
sqrt(57) / 2)
assert Circle(x**2 + y**2 + 6*x + 8*y + 25) == Circle(Point2D(-3, -4), 0)
assert Circle(a**2 + b**2 + 6*a + 8*b + 25, x='a', y='b') == Circle(Point2D(-3, -4), 0)
assert Circle(x**2 + y**2 - 25) == Circle(Point2D(0, 0), 5)
assert Circle(x**2 + y**2) == Circle(Point2D(0, 0), 0)
assert Circle(a**2 + b**2, x='a', y='b') == Circle(Point2D(0, 0), 0)
assert Circle(x**2 + y**2 + 6*x + 8) == Circle(Point2D(-3, 0), 1)
assert Circle(x**2 + y**2 + 6*y + 8) == Circle(Point2D(0, -3), 1)
assert Circle(6*(x**2) + 6*(y**2) + 6*x + 8*y - 25) == Circle(Point2D(-S(1)/2, -S(2)/3), 5*sqrt(37)/6)
raises(GeometryError, lambda: Circle(x**2 + y**2 + 3*x + 4*y + 26))
raises(GeometryError, lambda: Circle(x**2 + y**2 + 25))
raises(GeometryError, lambda: Circle(a**2 + b**2 + 25, x='a', y='b'))
raises(GeometryError, lambda: Circle(x**2 + 6*y + 8))
raises(GeometryError, lambda: Circle(6*(x ** 2) + 4*(y**2) + 6*x + 8*y + 25))
raises(ValueError, lambda: Circle(a**2 + b**2 + 3*a + 4*b - 8))
@slow
def test_ellipse_geom():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
t = Symbol('t', real=True)
y1 = Symbol('y1', real=True)
half = Rational(1, 2)
p1 = Point(0, 0)
p2 = Point(1, 1)
p4 = Point(0, 1)
e1 = Ellipse(p1, 1, 1)
e2 = Ellipse(p2, half, 1)
e3 = Ellipse(p1, y1, y1)
c1 = Circle(p1, 1)
c2 = Circle(p2, 1)
c3 = Circle(Point(sqrt(2), sqrt(2)), 1)
l1 = Line(p1, p2)
cen, rad = Point(3*half, 2), 5*half
assert Circle(Point(0, 0), Point(3, 0), Point(0, 4)) == Circle(cen, rad)
assert Circle(Point(0, 0), Point(1, 1), Point(2, 2)) == Segment2D(Point2D(0, 0), Point2D(2, 2))
raises(ValueError, lambda: Ellipse(None, None, None, 1))
raises(GeometryError, lambda: Circle(Point(0, 0)))
assert Ellipse(None, 1, 1).center == Point(0, 0)
assert e1 == c1
assert e1 != e2
assert e1 != l1
assert p4 in e1
assert p2 not in e2
assert e1.area == pi
assert e2.area == pi/2
assert e3.area == pi*y1*abs(y1)
assert c1.area == e1.area
assert c1.circumference == e1.circumference
assert e3.circumference == 2*pi*y1
assert e1.plot_interval() == e2.plot_interval() == [t, -pi, pi]
assert e1.plot_interval(x) == e2.plot_interval(x) == [x, -pi, pi]
assert c1.minor == 1
assert c1.major == 1
assert c1.hradius == 1
assert c1.vradius == 1
assert Ellipse((1, 1), 0, 0) == Point(1, 1)
assert Ellipse((1, 1), 1, 0) == Segment(Point(0, 1), Point(2, 1))
assert Ellipse((1, 1), 0, 1) == Segment(Point(1, 0), Point(1, 2))
assert hash(c1) == hash(Circle(Point(1, 0), Point(0, 1), Point(0, -1)))
assert c1 in e1
assert (Line(p1, p2) in e1) is False
assert e1.__cmp__(e1) == 0
assert e1.__cmp__(Point(0, 0)) > 0
assert e1.encloses(Segment(Point(-0.5, -0.5), Point(0.5, 0.5))) is True
assert e1.encloses(Line(p1, p2)) is False
assert e1.encloses(Ray(p1, p2)) is False
assert e1.encloses(e1) is False
assert e1.encloses(
Polygon(Point(-0.5, -0.5), Point(-0.5, 0.5), Point(0.5, 0.5))) is True
assert e1.encloses(RegularPolygon(p1, 0.5, 3)) is True
assert e1.encloses(RegularPolygon(p1, 5, 3)) is False
assert e1.encloses(RegularPolygon(p2, 5, 3)) is False
assert e2.arbitrary_point() in e2
f1, f2 = Point(sqrt(12), 0), Point(-sqrt(12), 0)
ef = Ellipse(Point(0, 0), 4, 2)
assert ef.foci in [(f1, f2), (f2, f1)]
v = sqrt(2) / 2
p1_1 = Point(v, v)
p1_2 = p2 + Point(half, 0)
p1_3 = p2 + Point(0, 1)
assert e1.tangent_lines(p4) == c1.tangent_lines(p4)
assert e2.tangent_lines(p1_2) == [Line(Point(S(3)/2, 1), Point(S(3)/2, S(1)/2))]
assert e2.tangent_lines(p1_3) == [Line(Point(1, 2), Point(S(5)/4, 2))]
assert c1.tangent_lines(p1_1) != [Line(p1_1, Point(0, sqrt(2)))]
assert c1.tangent_lines(p1) == []
assert e2.is_tangent(Line(p1_2, p2 + Point(half, 1)))
assert e2.is_tangent(Line(p1_3, p2 + Point(half, 1)))
assert c1.is_tangent(Line(p1_1, Point(0, sqrt(2))))
assert e1.is_tangent(Line(Point(0, 0), Point(1, 1))) is False
assert c1.is_tangent(e1) is True
assert c1.is_tangent(Ellipse(Point(2, 0), 1, 1)) is True
assert c1.is_tangent(
Polygon(Point(1, 1), Point(1, -1), Point(2, 0))) is True
assert c1.is_tangent(
Polygon(Point(1, 1), Point(1, 0), Point(2, 0))) is False
assert Circle(Point(5, 5), 3).is_tangent(Circle(Point(0, 5), 1)) is False
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(0, 0)) == \
[Line(Point(0, 0), Point(S(77)/25, S(132)/25)),
Line(Point(0, 0), Point(S(33)/5, S(22)/5))]
assert Ellipse(Point(5, 5), 2, 1).tangent_lines(Point(3, 4)) == \
[Line(Point(3, 4), Point(4, 4)), Line(Point(3, 4), Point(3, 5))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(3, 3)) == \
[Line(Point(3, 3), Point(4, 3)), Line(Point(3, 3), Point(3, 4))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(5 - 2*sqrt(2), 5)) == \
[Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 - sqrt(2))),
Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 + sqrt(2))), ]
# so only test up to the desired precision
def lines_close(l1, l2, prec):
return abs(l1.p1 - l2.p1) < 10**(-prec) and abs(l1.p2 - l2.p2) < 10**(-prec)
def line_list_close(ll1, ll2, prec):
return all(lines_close(l1, l2, prec) for l1, l2 in zip(ll1, ll2))
e = Ellipse(Point(0, 0), 2, 1)
assert e.normal_lines(Point(0, 0)) == \
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
assert e.normal_lines(Point(1, 0)) == \
[Line(Point(0, 0), Point(1, 0))]
assert e.normal_lines((0, 1)) == \
[Line(Point(0, 0), Point(0, 1))]
assert line_list_close(e.normal_lines(Point(1, 1), 2), [
Line(Point(-S(51)/26, -S(1)/5), Point(-S(25)/26, S(17)/83)),
Line(Point(S(28)/29, -S(7)/8), Point(S(57)/29, -S(9)/2))], 2)
# test the failure of Poly.intervals and checks a point on the boundary
p = Point(sqrt(3), S.Half)
assert p in e
assert line_list_close(e.normal_lines(p, 2), [
Line(Point(-S(341)/171, -S(1)/13), Point(-S(170)/171, S(5)/64)),
Line(Point(S(26)/15, -S(1)/2), Point(S(41)/15, -S(43)/26))], 2)
# be sure to use the slope that isn't undefined on boundary
e = Ellipse((0, 0), 2, 2*sqrt(3)/3)
assert line_list_close(e.normal_lines((1, 1), 2), [
Line(Point(-S(64)/33, -S(20)/71), Point(-S(31)/33, S(2)/13)),
Line(Point(1, -1), Point(2, -4))], 2)
e = Ellipse((0, 0), x, 1)
assert e.normal_lines((x + 1, 0)) == [Line(Point(0, 0), Point(1, 0))]
raises(NotImplementedError, lambda: e.normal_lines((x + 1, 1)))
major = 3
minor = 1
e4 = Ellipse(p2, minor, major)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
assert e4.semilatus_rectum == major*(1 - ecc ** 2)
e4 = Ellipse(p2, major, minor)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
l1 = Line(Point(1, -5), Point(1, 5))
l2 = Line(Point(-5, -1), Point(5, -1))
l3 = Line(Point(-1, -1), Point(1, 1))
l4 = Line(Point(-10, 0), Point(0, 10))
pts_c1_l3 = [Point(sqrt(2)/2, sqrt(2)/2), Point(-sqrt(2)/2, -sqrt(2)/2)]
assert intersection(e2, l4) == []
assert intersection(c1, Point(1, 0)) == [Point(1, 0)]
assert intersection(c1, l1) == [Point(1, 0)]
assert intersection(c1, l2) == [Point(0, -1)]
assert intersection(c1, l3) in [pts_c1_l3, [pts_c1_l3[1], pts_c1_l3[0]]]
assert intersection(c1, c2) == [Point(0, 1), Point(1, 0)]
assert intersection(c1, c3) == [Point(sqrt(2)/2, sqrt(2)/2)]
assert e1.intersection(l1) == [Point(1, 0)]
assert e2.intersection(l4) == []
assert e1.intersection(Circle(Point(0, 2), 1)) == [Point(0, 1)]
assert e1.intersection(Circle(Point(5, 0), 1)) == []
assert e1.intersection(Ellipse(Point(2, 0), 1, 1)) == [Point(1, 0)]
assert e1.intersection(Ellipse(Point(5, 0), 1, 1)) == []
assert e1.intersection(Point(2, 0)) == []
assert e1.intersection(e1) == e1
assert intersection(Ellipse(Point(0, 0), 2, 1), Ellipse(Point(3, 0), 1, 2)) == [Point(2, 0)]
assert intersection(Circle(Point(0, 0), 2), Circle(Point(3, 0), 1)) == [Point(2, 0)]
assert intersection(Circle(Point(0, 0), 2), Circle(Point(7, 0), 1)) == []
assert intersection(Ellipse(Point(0, 0), 5, 17), Ellipse(Point(4, 0), 1, 0.2)) == [Point(5, 0)]
assert intersection(Ellipse(Point(0, 0), 5, 17), Ellipse(Point(4, 0), 0.999, 0.2)) == []
assert Circle((0, 0), S(1)/2).intersection(
Triangle((-1, 0), (1, 0), (0, 1))) == [
Point(-S(1)/2, 0), Point(S(1)/2, 0)]
raises(TypeError, lambda: intersection(e2, Line((0, 0, 0), (0, 0, 1))))
raises(TypeError, lambda: intersection(e2, Rational(12)))
csmall = Circle(p1, 3)
cbig = Circle(p1, 5)
cout = Circle(Point(5, 5), 1)
assert csmall.intersection(cbig) == []
assert csmall.intersection(cout) == []
assert csmall.intersection(csmall) == csmall
v = sqrt(2)
t1 = Triangle(Point(0, v), Point(0, -v), Point(v, 0))
points = intersection(t1, c1)
assert len(points) == 4
assert Point(0, 1) in points
assert Point(0, -1) in points
assert Point(v/2, v/2) in points
assert Point(v/2, -v/2) in points
circ = Circle(Point(0, 0), 5)
elip = Ellipse(Point(0, 0), 5, 20)
assert intersection(circ, elip) in \
[[Point(5, 0), Point(-5, 0)], [Point(-5, 0), Point(5, 0)]]
assert elip.tangent_lines(Point(0, 0)) == []
elip = Ellipse(Point(0, 0), 3, 2)
assert elip.tangent_lines(Point(3, 0)) == \
[Line(Point(3, 0), Point(3, -12))]
e1 = Ellipse(Point(0, 0), 5, 10)
e2 = Ellipse(Point(2, 1), 4, 8)
a = S(53)/17
c = 2*sqrt(3991)/17
ans = [Point(a - c/8, a/2 + c), Point(a + c/8, a/2 - c)]
assert e1.intersection(e2) == ans
e2 = Ellipse(Point(x, y), 4, 8)
c = sqrt(3991)
ans = [Point(-c/68 + a, 2*c/17 + a/2), Point(c/68 + a, -2*c/17 + a/2)]
assert [p.subs({x: 2, y:1}) for p in e1.intersection(e2)] == ans
assert e3.is_tangent(e3.tangent_lines(p1 + Point(y1, 0))[0])
e = Ellipse((1, 2), 3, 2)
assert e.tangent_lines(Point(10, 0)) == \
[Line(Point(10, 0), Point(1, 0)),
Line(Point(10, 0), Point(S(14)/5, S(18)/5))]
e = Ellipse((0, 0), 1, 2)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(
e.center + Point(e.hradius + Rational(1, 10), 0)) is False
e = Ellipse((0, 0), 2, 1)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(
e.center + Point(e.hradius + Rational(1, 10), 0)) is False
assert c1.encloses_point(Point(1, 0)) is False
assert c1.encloses_point(Point(0.3, 0.4)) is True
assert e.scale(2, 3) == Ellipse((0, 0), 4, 3)
assert e.scale(3, 6) == Ellipse((0, 0), 6, 6)
assert e.rotate(pi) == e
assert e.rotate(pi, (1, 2)) == Ellipse(Point(2, 4), 2, 1)
raises(NotImplementedError, lambda: e.rotate(pi/3))
cir = Circle(Point(1, 0), 1)
assert cir.rotate(pi/2) == Circle(Point(0, 1), 1)
assert cir.rotate(pi/3) == Circle(Point(S(1)/2, sqrt(3)/2), 1)
assert cir.rotate(pi/3, Point(1, 0)) == Circle(Point(1, 0), 1)
assert cir.rotate(pi/3, Point(0, 1)) == Circle(Point(S(1)/2 + sqrt(3)/2, S(1)/2 + sqrt(3)/2), 1)
def test_construction():
e1 = Ellipse(hradius=2, vradius=1, eccentricity=None)
assert e1.eccentricity == sqrt(3)/2
e2 = Ellipse(hradius=2, vradius=None, eccentricity=sqrt(3)/2)
assert e2.vradius == 1
e3 = Ellipse(hradius=None, vradius=1, eccentricity=sqrt(3)/2)
assert e3.hradius == 2
e4 = Ellipse(Point(0, 0), hradius=1, eccentricity=0)
assert e4.vradius == 1
def test_ellipse_random_point():
y1 = Symbol('y1', real=True)
e3 = Ellipse(Point(0, 0), y1, y1)
rx, ry = Symbol('rx'), Symbol('ry')
for ind in range(0, 5):
r = e3.random_point()
assert e3.equation(rx, ry).subs(zip((rx, ry), r.args)).equals(0)
def test_repr():
assert repr(Circle((0, 1), 2)) == 'Circle(Point2D(0, 1), 2)'
def test_transform():
c = Circle((1, 1), 2)
assert c.scale(-1) == Circle((-1, 1), 2)
assert c.scale(y=-1) == Circle((1, -1), 2)
assert c.scale(2) == Ellipse((2, 1), 4, 2)
assert Ellipse((0, 0), 2, 3).scale(2, 3, (4, 5)) == \
Ellipse(Point(-4, -10), 4, 9)
assert Circle((0, 0), 2).scale(2, 3, (4, 5)) == \
Ellipse(Point(-4, -10), 4, 6)
assert Ellipse((0, 0), 2, 3).scale(3, 3, (4, 5)) == \
Ellipse(Point(-8, -10), 6, 9)
assert Circle((0, 0), 2).scale(3, 3, (4, 5)) == \
Circle(Point(-8, -10), 6)
assert Circle(Point(-8, -10), 6).scale(S(1)/3, S(1)/3, (4, 5)) == \
Circle((0, 0), 2)
assert Circle((0, 0), 2).translate(4, 5) == \
Circle((4, 5), 2)
assert Circle((0, 0), 2).scale(3, 3) == \
Circle((0, 0), 6)
def test_bounds():
e1 = Ellipse(Point(0, 0), 3, 5)
e2 = Ellipse(Point(2, -2), 7, 7)
c1 = Circle(Point(2, -2), 7)
c2 = Circle(Point(-2, 0), Point(0, 2), Point(2, 0))
assert e1.bounds == (-3, -5, 3, 5)
assert e2.bounds == (-5, -9, 9, 5)
assert c1.bounds == (-5, -9, 9, 5)
assert c2.bounds == (-2, -2, 2, 2)
def test_reflect():
b = Symbol('b')
m = Symbol('m')
l = Line((0, b), slope=m)
t1 = Triangle((0, 0), (1, 0), (2, 3))
assert t1.area == -t1.reflect(l).area
e = Ellipse((1, 0), 1, 2)
assert e.area == -e.reflect(Line((1, 0), slope=0)).area
assert e.area == -e.reflect(Line((1, 0), slope=oo)).area
raises(NotImplementedError, lambda: e.reflect(Line((1, 0), slope=m)))
def test_is_tangent():
e1 = Ellipse(Point(0, 0), 3, 5)
c1 = Circle(Point(2, -2), 7)
assert e1.is_tangent(Point(0, 0)) is False
assert e1.is_tangent(Point(3, 0)) is False
assert e1.is_tangent(e1) is True
assert e1.is_tangent(Ellipse((0, 0), 1, 2)) is False
assert e1.is_tangent(Ellipse((0, 0), 3, 2)) is True
assert c1.is_tangent(Ellipse((2, -2), 7, 1)) is True
assert c1.is_tangent(Circle((11, -2), 2)) is True
assert c1.is_tangent(Circle((7, -2), 2)) is True
assert c1.is_tangent(Ray((-5, -2), (-15, -20))) is False
assert c1.is_tangent(Ray((-3, -2), (-15, -20))) is False
assert c1.is_tangent(Ray((-3, -22), (15, 20))) is False
assert c1.is_tangent(Ray((9, 20), (9, -20))) is True
assert e1.is_tangent(Segment((2, 2), (-7, 7))) is False
assert e1.is_tangent(Segment((0, 0), (1, 2))) is False
assert c1.is_tangent(Segment((0, 0), (-5, -2))) is False
assert e1.is_tangent(Segment((3, 0), (12, 12))) is False
assert e1.is_tangent(Segment((12, 12), (3, 0))) is False
assert e1.is_tangent(Segment((-3, 0), (3, 0))) is False
assert e1.is_tangent(Segment((-3, 5), (3, 5))) is True
assert e1.is_tangent(Line((0, 0), (1, 1))) is False
assert e1.is_tangent(Line((-3, 0), (-2.99, -0.001))) is False
assert e1.is_tangent(Line((-3, 0), (-3, 1))) is True
assert e1.is_tangent(Polygon((0, 0), (5, 5), (5, -5))) is False
assert e1.is_tangent(Polygon((-100, -50), (-40, -334), (-70, -52))) is False
assert e1.is_tangent(Polygon((-3, 0), (3, 0), (0, 1))) is False
assert e1.is_tangent(Polygon((-3, 0), (3, 0), (0, 5))) is False
assert e1.is_tangent(Polygon((-3, 0), (0, -5), (3, 0), (0, 5))) is False
assert e1.is_tangent(Polygon((-3, -5), (-3, 5), (3, 5), (3, -5))) is True
assert c1.is_tangent(Polygon((-3, -5), (-3, 5), (3, 5), (3, -5))) is False
assert e1.is_tangent(Polygon((0, 0), (3, 0), (7, 7), (0, 5))) is False
assert e1.is_tangent(Polygon((3, 12), (3, -12), (6, 5))) is True
assert e1.is_tangent(Polygon((3, 12), (3, -12), (0, -5), (0, 5))) is False
assert e1.is_tangent(Polygon((3, 0), (5, 7), (6, -5))) is False
raises(TypeError, lambda: e1.is_tangent(Point(0, 0, 0)))
raises(TypeError, lambda: e1.is_tangent(Rational(5)))
def test_parameter_value():
t = Symbol('t')
e = Ellipse(Point(0, 0), 3, 5)
assert e.parameter_value((3, 0), t) == {t: 0}
raises(ValueError, lambda: e.parameter_value((4, 0), t))
@slow
def test_second_moment_of_area():
x, y = symbols('x, y')
e = Ellipse(Point(0, 0), 5, 4)
I_yy = 2*4*integrate(sqrt(25 - x**2)*x**2, (x, -5, 5))/5
I_xx = 2*5*integrate(sqrt(16 - y**2)*y**2, (y, -4, 4))/4
Y = 3*sqrt(1 - x**2/5**2)
I_xy = integrate(integrate(y, (y, -Y, Y))*x, (x, -5, 5))
assert I_yy == e.second_moment_of_area()[1]
assert I_xx == e.second_moment_of_area()[0]
assert I_xy == e.second_moment_of_area()[2]
def test_circumference():
M = Symbol('M')
m = Symbol('m')
assert Ellipse(Point(0, 0), M, m).circumference == 4 * M * elliptic_e((M ** 2 - m ** 2) / M**2)
assert Ellipse(Point(0, 0), 5, 4).circumference == 20 * elliptic_e(S(9) / 25)
assert Ellipse(None, 1, None, 1).length == 2
assert Ellipse(None, 1, None, 0).circumference == 2*pi
assert abs(Ellipse(None, hradius=5, vradius=3).circumference.evalf(16) - 25.52699886339813) < 1e-10
def test_issue_15259():
assert Circle((1, 2), 0) == Point(1, 2)
def test_issue_15797_equals():
Ri = 0.024127189424130748
Ci = (0.0864931002830291, 0.0819863295239654)
A = Point(0, 0.0578591400998346)
c = Circle(Ci, Ri)
assert c.is_tangent(c.tangent_lines(A)[0]) == True
assert c.center.x.is_Rational
assert c.center.y.is_Rational
assert c.radius.is_Rational
u = Circle(Ci, Ri, evaluate=False)
assert u.center.x.is_Float
assert u.center.y.is_Float
assert u.radius.is_Float
def test_auxiliary_circle():
x, y, a, b = symbols('x y a b')
e = Ellipse((x, y), a, b)
assert e.auxiliary_circle() == Circle((x, y), Max(a, b))
assert Circle((3, 4), 8).auxiliary_circle() == Circle((3, 4), 8)
def test_director_circle():
x, y, a, b = symbols('x y a b')
e = Ellipse((x, y), a, b)
assert e.director_circle() == Circle((x, y), sqrt(a**2 + b**2))
assert Circle((3, 4), 8).director_circle() == Circle((3, 4), 8*sqrt(2))
| true | true |
1c33996306856d8ab8db44dd118d4cfa6add1638 | 8,846 | py | Python | tools/sqlmap/plugins/dbms/maxdb/enumeration.py | glaudsonml/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | 35 | 2017-05-22T14:42:01.000Z | 2020-09-07T21:24:41.000Z | tools/sqlmap/plugins/dbms/maxdb/enumeration.py | tmaxter/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | null | null | null | tools/sqlmap/plugins/dbms/maxdb/enumeration.py | tmaxter/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | 5 | 2017-12-19T03:36:54.000Z | 2021-04-14T18:05:08.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.utils.pivotdumptable import pivotDumpTable
from lib.techniques.brute.use import columnExists
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
kb.data.processChar = lambda x: x.replace('_', ' ') if x else x
def getPasswordHashes(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user password hashes"
logger.warn(warnMsg)
return {}
def getDbs(self):
if len(kb.data.cachedDbs) > 0:
return kb.data.cachedDbs
infoMsg = "fetching database names"
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].dbs
randStr = randomStr()
query = rootQuery.inband.query
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.schemaname' % randStr], blind=True)
if retVal:
kb.data.cachedDbs = retVal[0].values()[0]
if kb.data.cachedDbs:
kb.data.cachedDbs.sort()
return kb.data.cachedDbs
def getTables(self, bruteForce=None):
if len(kb.data.cachedTables) > 0:
return kb.data.cachedTables
self.forceDbmsEnum()
if conf.db == CURRENT_DB:
conf.db = self.getCurrentDb()
if conf.db:
dbs = conf.db.split(",")
else:
dbs = self.getDbs()
for db in filter(None, dbs):
dbs[dbs.index(db)] = safeSQLIdentificatorNaming(db)
infoMsg = "fetching tables for database"
infoMsg += "%s: %s" % ("s" if len(dbs) > 1 else "", ", ".join(db if isinstance(db, basestring) else db[0] for db in sorted(dbs)))
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].tables
for db in dbs:
randStr = randomStr()
query = rootQuery.inband.query % (("'%s'" % db) if db != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.tablename' % randStr], blind=True)
if retVal:
for table in retVal[0].values()[0]:
if db not in kb.data.cachedTables:
kb.data.cachedTables[db] = [table]
else:
kb.data.cachedTables[db].append(table)
for db, tables in kb.data.cachedTables.items():
kb.data.cachedTables[db] = sorted(tables) if tables else tables
return kb.data.cachedTables
def getColumns(self, onlyColNames=False, colTuple=None, bruteForce=None, dumpMode=False):
self.forceDbmsEnum()
if conf.db is None or conf.db == CURRENT_DB:
if conf.db is None:
warnMsg = "missing database parameter. sqlmap is going "
warnMsg += "to use the current database to enumerate "
warnMsg += "table(s) columns"
logger.warn(warnMsg)
conf.db = self.getCurrentDb()
elif conf.db is not None:
if ',' in conf.db:
errMsg = "only one database name is allowed when enumerating "
errMsg += "the tables' columns"
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.db = safeSQLIdentificatorNaming(conf.db)
if conf.col:
colList = conf.col.split(",")
else:
colList = []
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
for col in colList:
colList[colList.index(col)] = safeSQLIdentificatorNaming(col)
if conf.tbl:
tblList = conf.tbl.split(",")
else:
self.getTables()
if len(kb.data.cachedTables) > 0:
tblList = kb.data.cachedTables.values()
if isinstance(tblList[0], (set, tuple, list)):
tblList = tblList[0]
else:
errMsg = "unable to retrieve the tables "
errMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
raise SqlmapNoneDataException(errMsg)
for tbl in tblList:
tblList[tblList.index(tbl)] = safeSQLIdentificatorNaming(tbl, True)
if bruteForce:
resumeAvailable = False
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
resumeAvailable = True
break
if resumeAvailable and not conf.freshQueries or colList:
columns = {}
for column in colList:
columns[column] = None
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
columns[colName] = colType
if conf.db in kb.data.cachedColumns:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)][safeSQLIdentificatorNaming(tbl, True)] = columns
else:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = {safeSQLIdentificatorNaming(tbl, True): columns}
return kb.data.cachedColumns
message = "do you want to use common column existence check? [y/N/q] "
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
return columnExists(paths.COMMON_COLUMNS)
rootQuery = queries[Backend.getIdentifiedDbms()].columns
for tbl in tblList:
if conf.db is not None and len(kb.data.cachedColumns) > 0 \
and conf.db in kb.data.cachedColumns and tbl in \
kb.data.cachedColumns[conf.db]:
infoMsg = "fetched tables' columns on "
infoMsg += "database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
return {conf.db: kb.data.cachedColumns[conf.db]}
if dumpMode and colList:
table = {}
table[safeSQLIdentificatorNaming(tbl)] = dict((_, None) for _ in colList)
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = table
continue
infoMsg = "fetching columns "
infoMsg += "for table '%s' " % unsafeSQLIdentificatorNaming(tbl)
infoMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
randStr = randomStr()
query = rootQuery.inband.query % (unsafeSQLIdentificatorNaming(tbl), ("'%s'" % unsafeSQLIdentificatorNaming(conf.db)) if unsafeSQLIdentificatorNaming(conf.db) != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.columnname' % randStr, '%s.datatype' % randStr, '%s.len' % randStr], blind=True)
if retVal:
table = {}
columns = {}
for columnname, datatype, length in zip(retVal[0]["%s.columnname" % randStr], retVal[0]["%s.datatype" % randStr], retVal[0]["%s.len" % randStr]):
columns[safeSQLIdentificatorNaming(columnname)] = "%s(%s)" % (datatype, length)
table[tbl] = columns
kb.data.cachedColumns[conf.db] = table
return kb.data.cachedColumns
def getPrivileges(self, *args):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def searchDb(self):
warnMsg = "on SAP MaxDB it is not possible to search databases"
logger.warn(warnMsg)
return []
def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
| 37.168067 | 193 | 0.587723 |
from lib.core.common import Backend
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.utils.pivotdumptable import pivotDumpTable
from lib.techniques.brute.use import columnExists
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
kb.data.processChar = lambda x: x.replace('_', ' ') if x else x
def getPasswordHashes(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user password hashes"
logger.warn(warnMsg)
return {}
def getDbs(self):
if len(kb.data.cachedDbs) > 0:
return kb.data.cachedDbs
infoMsg = "fetching database names"
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].dbs
randStr = randomStr()
query = rootQuery.inband.query
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.schemaname' % randStr], blind=True)
if retVal:
kb.data.cachedDbs = retVal[0].values()[0]
if kb.data.cachedDbs:
kb.data.cachedDbs.sort()
return kb.data.cachedDbs
def getTables(self, bruteForce=None):
if len(kb.data.cachedTables) > 0:
return kb.data.cachedTables
self.forceDbmsEnum()
if conf.db == CURRENT_DB:
conf.db = self.getCurrentDb()
if conf.db:
dbs = conf.db.split(",")
else:
dbs = self.getDbs()
for db in filter(None, dbs):
dbs[dbs.index(db)] = safeSQLIdentificatorNaming(db)
infoMsg = "fetching tables for database"
infoMsg += "%s: %s" % ("s" if len(dbs) > 1 else "", ", ".join(db if isinstance(db, basestring) else db[0] for db in sorted(dbs)))
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].tables
for db in dbs:
randStr = randomStr()
query = rootQuery.inband.query % (("'%s'" % db) if db != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.tablename' % randStr], blind=True)
if retVal:
for table in retVal[0].values()[0]:
if db not in kb.data.cachedTables:
kb.data.cachedTables[db] = [table]
else:
kb.data.cachedTables[db].append(table)
for db, tables in kb.data.cachedTables.items():
kb.data.cachedTables[db] = sorted(tables) if tables else tables
return kb.data.cachedTables
def getColumns(self, onlyColNames=False, colTuple=None, bruteForce=None, dumpMode=False):
self.forceDbmsEnum()
if conf.db is None or conf.db == CURRENT_DB:
if conf.db is None:
warnMsg = "missing database parameter. sqlmap is going "
warnMsg += "to use the current database to enumerate "
warnMsg += "table(s) columns"
logger.warn(warnMsg)
conf.db = self.getCurrentDb()
elif conf.db is not None:
if ',' in conf.db:
errMsg = "only one database name is allowed when enumerating "
errMsg += "the tables' columns"
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.db = safeSQLIdentificatorNaming(conf.db)
if conf.col:
colList = conf.col.split(",")
else:
colList = []
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
for col in colList:
colList[colList.index(col)] = safeSQLIdentificatorNaming(col)
if conf.tbl:
tblList = conf.tbl.split(",")
else:
self.getTables()
if len(kb.data.cachedTables) > 0:
tblList = kb.data.cachedTables.values()
if isinstance(tblList[0], (set, tuple, list)):
tblList = tblList[0]
else:
errMsg = "unable to retrieve the tables "
errMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
raise SqlmapNoneDataException(errMsg)
for tbl in tblList:
tblList[tblList.index(tbl)] = safeSQLIdentificatorNaming(tbl, True)
if bruteForce:
resumeAvailable = False
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
resumeAvailable = True
break
if resumeAvailable and not conf.freshQueries or colList:
columns = {}
for column in colList:
columns[column] = None
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
columns[colName] = colType
if conf.db in kb.data.cachedColumns:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)][safeSQLIdentificatorNaming(tbl, True)] = columns
else:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = {safeSQLIdentificatorNaming(tbl, True): columns}
return kb.data.cachedColumns
message = "do you want to use common column existence check? [y/N/q] "
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
return columnExists(paths.COMMON_COLUMNS)
rootQuery = queries[Backend.getIdentifiedDbms()].columns
for tbl in tblList:
if conf.db is not None and len(kb.data.cachedColumns) > 0 \
and conf.db in kb.data.cachedColumns and tbl in \
kb.data.cachedColumns[conf.db]:
infoMsg = "fetched tables' columns on "
infoMsg += "database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
return {conf.db: kb.data.cachedColumns[conf.db]}
if dumpMode and colList:
table = {}
table[safeSQLIdentificatorNaming(tbl)] = dict((_, None) for _ in colList)
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = table
continue
infoMsg = "fetching columns "
infoMsg += "for table '%s' " % unsafeSQLIdentificatorNaming(tbl)
infoMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
randStr = randomStr()
query = rootQuery.inband.query % (unsafeSQLIdentificatorNaming(tbl), ("'%s'" % unsafeSQLIdentificatorNaming(conf.db)) if unsafeSQLIdentificatorNaming(conf.db) != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.columnname' % randStr, '%s.datatype' % randStr, '%s.len' % randStr], blind=True)
if retVal:
table = {}
columns = {}
for columnname, datatype, length in zip(retVal[0]["%s.columnname" % randStr], retVal[0]["%s.datatype" % randStr], retVal[0]["%s.len" % randStr]):
columns[safeSQLIdentificatorNaming(columnname)] = "%s(%s)" % (datatype, length)
table[tbl] = columns
kb.data.cachedColumns[conf.db] = table
return kb.data.cachedColumns
def getPrivileges(self, *args):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def searchDb(self):
warnMsg = "on SAP MaxDB it is not possible to search databases"
logger.warn(warnMsg)
return []
def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
| true | true |
1c339b096f870231440fe4df89c70a59a71d8ee8 | 2,741 | py | Python | PSO.py | plenoi/EvoloPy | 7c943925b9a73ad671735493ce281b67d178dc7c | [
"Apache-2.0"
] | null | null | null | PSO.py | plenoi/EvoloPy | 7c943925b9a73ad671735493ce281b67d178dc7c | [
"Apache-2.0"
] | null | null | null | PSO.py | plenoi/EvoloPy | 7c943925b9a73ad671735493ce281b67d178dc7c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun May 15 22:37:00 2016
@author: Hossam Faris
"""
import random
import numpy
from colorama import Fore, Back, Style
from solution import solution
import time
def PSO(objf,lb,ub,dim,PopSize,iters):
# PSO parameters
# dim=30
# iters=200
Vmax=6
# PopSize=50 #population size
wMax=0.9
wMin=0.2
c1=2
c2=2
# lb=-10
# ub=10
#
s=solution()
if not isinstance(lb, list):
lb = [lb] * dim
if not isinstance(ub, list):
ub = [ub] * dim
######################## Initializations
vel=numpy.zeros((PopSize,dim))
pBestScore=numpy.zeros(PopSize)
pBestScore.fill(float("inf"))
pBest=numpy.zeros((PopSize,dim))
gBest=numpy.zeros(dim)
gBestScore=float("inf")
pos = numpy.zeros((PopSize, dim))
for i in range(dim):
pos[:, i] = numpy.random.uniform(0,1, PopSize) * (ub[i] - lb[i]) + lb[i]
convergence_curve=numpy.zeros(iters)
############################################
print("PSO is optimizing \""+objf.__name__+"\"")
timerStart=time.time()
s.startTime=time.strftime("%Y-%m-%d-%H-%M-%S")
for l in range(0,iters):
for i in range(0,PopSize):
#pos[i,:]=checkBounds(pos[i,:],lb,ub)
for j in range(dim):
pos[i, j] = numpy.clip(pos[i,j], lb[j], ub[j])
#Calculate objective function for each particle
fitness=objf(pos[i,:])
if(pBestScore[i]>fitness):
pBestScore[i]=fitness
pBest[i,:]=pos[i,:].copy()
if(gBestScore>fitness):
gBestScore=fitness
gBest=pos[i,:].copy()
#Update the W of PSO
w=wMax-l*((wMax-wMin)/iters);
for i in range(0,PopSize):
for j in range (0,dim):
r1=random.random()
r2=random.random()
vel[i,j]=w*vel[i,j]+c1*r1*(pBest[i,j]-pos[i,j])+c2*r2*(gBest[j]-pos[i,j])
if(vel[i,j]>Vmax):
vel[i,j]=Vmax
if(vel[i,j]<-Vmax):
vel[i,j]=-Vmax
pos[i,j]=pos[i,j]+vel[i,j]
convergence_curve[l]=gBestScore
if (l%1==0):
print(['At iteration '+ str(l+1)+ ' the best fitness is '+ str(gBestScore)]);
timerEnd=time.time()
s.endTime=time.strftime("%Y-%m-%d-%H-%M-%S")
s.executionTime=timerEnd-timerStart
s.convergence=convergence_curve
s.optimizer="PSO"
s.objfname=objf.__name__
return s
| 24.256637 | 92 | 0.484495 |
import random
import numpy
from colorama import Fore, Back, Style
from solution import solution
import time
def PSO(objf,lb,ub,dim,PopSize,iters):
Vmax=6
wMin=0.2
c1=2
c2=2
s=solution()
if not isinstance(lb, list):
lb = [lb] * dim
if not isinstance(ub, list):
ub = [ub] * dim
convergence_curve[l]=gBestScore
if (l%1==0):
print(['At iteration '+ str(l+1)+ ' the best fitness is '+ str(gBestScore)]);
timerEnd=time.time()
s.endTime=time.strftime("%Y-%m-%d-%H-%M-%S")
s.executionTime=timerEnd-timerStart
s.convergence=convergence_curve
s.optimizer="PSO"
s.objfname=objf.__name__
return s
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.