hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c82edf683517fc13c8a9280a1bccfd34ca9bf0c | 6,552 | py | Python | test/functional/p2p_tx_download.py | DevVoluntarism/bitcoin-abc | b37e310aa64c5e87f11e92549a5897fc51abd142 | [
"MIT"
] | null | null | null | test/functional/p2p_tx_download.py | DevVoluntarism/bitcoin-abc | b37e310aa64c5e87f11e92549a5897fc51abd142 | [
"MIT"
] | null | null | null | test/functional/p2p_tx_download.py | DevVoluntarism/bitcoin-abc | b37e310aa64c5e87f11e92549a5897fc51abd142 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test transaction download behavior
"""
from test_framework.address import ADDRESS_BCHREG_UNSPENDABLE
from test_framework.messages import (
CInv,
CTransaction,
FromHex,
MSG_TX,
MSG_TYPE_MASK,
msg_inv,
msg_notfound,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
import time
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.tx_getdata_count = 0
def on_getdata(self, message):
for i in message.inv:
if i.type & MSG_TYPE_MASK == MSG_TX:
self.tx_getdata_count += 1
# Constants from net_processing
GETDATA_TX_INTERVAL = 60 # seconds
MAX_GETDATA_RANDOM_DELAY = 2 # seconds
INBOUND_PEER_TX_DELAY = 2 # seconds
MAX_GETDATA_IN_FLIGHT = 100
TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10
# Python test constants
NUM_INBOUND = 10
MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + \
MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY
class TxDownloadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 2
def test_tx_requests(self):
self.log.info(
"Test that we request transactions from all our peers, eventually")
txid = 0xdeadbeef
self.log.info("Announce the txid from each incoming peer to node 0")
msg = msg_inv([CInv(t=1, h=txid)])
for p in self.nodes[0].p2ps:
p.send_and_ping(msg)
outstanding_peer_index = [i for i in range(len(self.nodes[0].p2ps))]
def getdata_found(peer_index):
p = self.nodes[0].p2ps[peer_index]
with mininode_lock:
return p.last_message.get(
"getdata") and p.last_message["getdata"].inv[-1].hash == txid
node_0_mocktime = int(time.time())
while outstanding_peer_index:
node_0_mocktime += MAX_GETDATA_INBOUND_WAIT
self.nodes[0].setmocktime(node_0_mocktime)
wait_until(lambda: any(getdata_found(i)
for i in outstanding_peer_index))
for i in outstanding_peer_index:
if getdata_found(i):
outstanding_peer_index.remove(i)
self.nodes[0].setmocktime(0)
self.log.info("All outstanding peers received a getdata")
def test_inv_block(self):
self.log.info("Generate a transaction on node 0")
tx = self.nodes[0].createrawtransaction(
inputs=[{
# coinbase
"txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0],
"vout": 0
}],
outputs={ADDRESS_BCHREG_UNSPENDABLE: 50 - 0.00025},
)
tx = self.nodes[0].signrawtransactionwithkey(
hexstring=tx,
privkeys=[self.nodes[0].get_deterministic_priv_key().key],
)['hex']
ctx = FromHex(CTransaction(), tx)
txid = int(ctx.rehash(), 16)
self.log.info(
"Announce the transaction to all nodes from all {} incoming peers, but never send it".format(NUM_INBOUND))
msg = msg_inv([CInv(t=1, h=txid)])
for p in self.peers:
p.send_and_ping(msg)
self.log.info("Put the tx in node 0's mempool")
self.nodes[0].sendrawtransaction(tx)
# Since node 1 is connected outbound to an honest peer (node 0), it
# should get the tx within a timeout. (Assuming that node 0
# announced the tx within the timeout)
# The timeout is the sum of
# * the worst case until the tx is first requested from an inbound
# peer, plus
# * the first time it is re-requested from the outbound peer, plus
# * 2 seconds to avoid races
assert self.nodes[1].getpeerinfo()[0]['inbound'] is False
timeout = 2 + (MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY) + (
GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY)
self.log.info(
"Tx should be received at node 1 after {} seconds".format(timeout))
self.sync_mempools(timeout=timeout)
def test_in_flight_max(self):
self.log.info("Test that we don't request more than {} transactions from any peer, every {} minutes".format(
MAX_GETDATA_IN_FLIGHT, TX_EXPIRY_INTERVAL / 60))
txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)]
p = self.nodes[0].p2ps[0]
with mininode_lock:
p.tx_getdata_count = 0
p.send_message(msg_inv([CInv(t=1, h=i) for i in txids]))
wait_until(lambda: p.tx_getdata_count >=
MAX_GETDATA_IN_FLIGHT, lock=mininode_lock)
with mininode_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
self.log.info(
"Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
p.send_message(msg_notfound(vec=[CInv(t=1, h=txids[0])]))
wait_until(
lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1,
timeout=10,
lock=mininode_lock)
with mininode_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL
self.log.info(
"if we wait about {} minutes, we should eventually get more requests".format(
WAIT_TIME / 60))
self.nodes[0].setmocktime(int(time.time() + WAIT_TIME))
wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2)
self.nodes[0].setmocktime(0)
def run_test(self):
# Setup the p2p connections
self.peers = []
for node in self.nodes:
for i in range(NUM_INBOUND):
self.peers.append(node.add_p2p_connection(TestP2PConn()))
self.log.info(
"Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
# Test the in-flight max first, because we want no transactions in
# flight ahead of this test.
self.test_in_flight_max()
self.test_inv_block()
self.test_tx_requests()
if __name__ == '__main__':
TxDownloadTest().main()
| 34.666667 | 118 | 0.634768 |
220cf001d6038206a7f1cf8f6c5e435736fb0700 | 21,067 | py | Python | references/classification/train.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | 1 | 2022-01-06T01:58:01.000Z | 2022-01-06T01:58:01.000Z | references/classification/train.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | null | null | null | references/classification/train.py | futurelife2016/vision | bbd9ff8fb936846aa0412996abab19b563677e5b | [
"BSD-3-Clause"
] | null | null | null | import datetime
import os
import time
import warnings
import presets
import torch
import torch.utils.data
import torchvision
import transforms
import utils
from torch import nn
from torch.utils.data.dataloader import default_collate
from torchvision.transforms.functional import InterpolationMode
try:
from torchvision.prototype import models as PM
except ImportError:
PM = None
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args, model_ema=None, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}"))
header = f"Epoch: [{epoch}]"
for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)):
start_time = time.time()
image, target = image.to(device), target.to(device)
output = model(image)
optimizer.zero_grad()
if args.amp:
with torch.cuda.amp.autocast():
loss = criterion(output, target)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss = criterion(output, target)
loss.backward()
if args.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(utils.get_optimizer_params(optimizer), args.clip_grad_norm)
optimizer.step()
if model_ema and i % args.model_ema_steps == 0:
model_ema.update_parameters(model)
if epoch < args.lr_warmup_epochs:
# Reset ema buffer to keep copying weights during warmup period
model_ema.n_averaged.fill_(0)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
def evaluate(model, criterion, data_loader, device, print_freq=100, log_suffix=""):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f"Test: {log_suffix}"
num_processed_samples = 0
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
num_processed_samples += batch_size
# gather the stats from all processes
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
if (
hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
# See FIXME above
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
metric_logger.synchronize_between_processes()
print(f"{header} Acc@1 {metric_logger.acc1.global_avg:.3f} Acc@5 {metric_logger.acc5.global_avg:.3f}")
return metric_logger.acc1.global_avg
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def load_data(traindir, valdir, args):
# Data loading code
print("Loading data")
val_resize_size, val_crop_size, train_crop_size = args.val_resize_size, args.val_crop_size, args.train_crop_size
interpolation = InterpolationMode(args.interpolation)
print("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print(f"Loading dataset_train from {cache_path}")
dataset, _ = torch.load(cache_path)
else:
auto_augment_policy = getattr(args, "auto_augment", None)
random_erase_prob = getattr(args, "random_erase", 0.0)
dataset = torchvision.datasets.ImageFolder(
traindir,
presets.ClassificationPresetTrain(
crop_size=train_crop_size,
interpolation=interpolation,
auto_augment_policy=auto_augment_policy,
random_erase_prob=random_erase_prob,
),
)
if args.cache_dataset:
print(f"Saving dataset_train to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
print("Took", time.time() - st)
print("Loading validation data")
cache_path = _get_cache_path(valdir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print(f"Loading dataset_test from {cache_path}")
dataset_test, _ = torch.load(cache_path)
else:
if not args.weights:
preprocessing = presets.ClassificationPresetEval(
crop_size=val_crop_size, resize_size=val_resize_size, interpolation=interpolation
)
else:
fn = PM.quantization.__dict__[args.model] if hasattr(args, "backend") else PM.__dict__[args.model]
weights = PM._api.get_weight(fn, args.weights)
preprocessing = weights.transforms()
dataset_test = torchvision.datasets.ImageFolder(
valdir,
preprocessing,
)
if args.cache_dataset:
print(f"Saving dataset_test to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler
def main(args):
if args.weights and PM is None:
raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
train_dir = os.path.join(args.data_path, "train")
val_dir = os.path.join(args.data_path, "val")
dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args)
collate_fn = None
num_classes = len(dataset.classes)
mixup_transforms = []
if args.mixup_alpha > 0.0:
mixup_transforms.append(transforms.RandomMixup(num_classes, p=1.0, alpha=args.mixup_alpha))
if args.cutmix_alpha > 0.0:
mixup_transforms.append(transforms.RandomCutmix(num_classes, p=1.0, alpha=args.cutmix_alpha))
if mixup_transforms:
mixupcutmix = torchvision.transforms.RandomChoice(mixup_transforms)
collate_fn = lambda batch: mixupcutmix(*default_collate(batch)) # noqa: E731
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=args.batch_size, sampler=test_sampler, num_workers=args.workers, pin_memory=True
)
print("Creating model")
if not args.weights:
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained, num_classes=num_classes)
else:
model = PM.__dict__[args.model](weights=args.weights, num_classes=num_classes)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)
if args.norm_weight_decay is None:
parameters = model.parameters()
else:
param_groups = torchvision.ops._utils.split_normalization_params(model)
wd_groups = [args.norm_weight_decay, args.weight_decay]
parameters = [{"params": p, "weight_decay": w} for p, w in zip(param_groups, wd_groups) if p]
opt_name = args.opt.lower()
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov="nesterov" in opt_name,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, eps=0.0316, alpha=0.9
)
elif opt_name == "adamw":
optimizer = torch.optim.AdamW(parameters, lr=args.lr, weight_decay=args.weight_decay)
else:
raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD, RMSprop and AdamW are supported.")
scaler = torch.cuda.amp.GradScaler() if args.amp else None
args.lr_scheduler = args.lr_scheduler.lower()
if args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
elif args.lr_scheduler == "cosineannealinglr":
main_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.epochs - args.lr_warmup_epochs
)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.lr_gamma)
else:
raise RuntimeError(
f"Invalid lr scheduler '{args.lr_scheduler}'. Only StepLR, CosineAnnealingLR and ExponentialLR "
"are supported."
)
if args.lr_warmup_epochs > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[args.lr_warmup_epochs]
)
else:
lr_scheduler = main_lr_scheduler
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
model_ema = None
if args.model_ema:
# Decay adjustment that aims to keep the decay independent from other hyper-parameters originally proposed at:
# https://github.com/facebookresearch/pycls/blob/f8cd9627/pycls/core/net.py#L123
#
# total_ema_updates = (Dataset_size / n_GPUs) * epochs / (batch_size_per_gpu * EMA_steps)
# We consider constant = Dataset_size for a given dataset/setup and ommit it. Thus:
# adjust = 1 / total_ema_updates ~= n_GPUs * batch_size_per_gpu * EMA_steps / epochs
adjust = args.world_size * args.batch_size * args.model_ema_steps / args.epochs
alpha = 1.0 - args.model_ema_decay
alpha = min(1.0, alpha * adjust)
model_ema = utils.ExponentialMovingAverage(model_without_ddp, device=device, decay=1.0 - alpha)
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
if not args.test_only:
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if model_ema:
model_ema.load_state_dict(checkpoint["model_ema"])
if args.test_only:
# We disable the cudnn benchmarking because it can noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if model_ema:
evaluate(model_ema, criterion, data_loader_test, device=device, log_suffix="EMA")
else:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args, model_ema, scaler)
lr_scheduler.step()
evaluate(model, criterion, data_loader_test, device=device)
if model_ema:
evaluate(model_ema, criterion, data_loader_test, device=device, log_suffix="EMA")
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if model_ema:
checkpoint["model_ema"] = model_ema.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
parser.add_argument("--model", default="resnet18", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)"
)
parser.add_argument("--opt", default="sgd", type=str, help="optimizer")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"--norm-weight-decay",
default=None,
type=float,
help="weight decay for Normalization layers (default: None, same value as --wd)",
)
parser.add_argument(
"--label-smoothing", default=0.0, type=float, help="label smoothing (default: 0.0)", dest="label_smoothing"
)
parser.add_argument("--mixup-alpha", default=0.0, type=float, help="mixup alpha (default: 0.0)")
parser.add_argument("--cutmix-alpha", default=0.0, type=float, help="cutmix alpha (default: 0.0)")
parser.add_argument("--lr-scheduler", default="steplr", type=str, help="the lr scheduler (default: steplr)")
parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument(
"--lr-warmup-method", default="constant", type=str, help="the warmup method (default: constant)"
)
parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr")
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
# Mixed precision training parameters
parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument(
"--model-ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters"
)
parser.add_argument(
"--model-ema-steps",
type=int,
default=32,
help="the number of iterations that controls how often to update the EMA model (default: 32)",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.99998,
help="decay factor for Exponential Moving Average of model parameters (default: 0.99998)",
)
parser.add_argument(
"--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only."
)
parser.add_argument(
"--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)"
)
parser.add_argument(
"--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)"
)
parser.add_argument(
"--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)"
)
parser.add_argument(
"--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)"
)
parser.add_argument("--clip-grad-norm", default=None, type=float, help="the maximum gradient norm (default None)")
# Prototype models only
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
| 42.993878 | 120 | 0.667679 |
7ecc5ba9b47a86e601ac5f743eb724add2f29bae | 1,647 | py | Python | snail/Si5351_Clock/constants.py | Who8MyLunch/Snail | 86f4093a884ceb6ba296afad50f238f439e4c2c4 | [
"MIT"
] | null | null | null | snail/Si5351_Clock/constants.py | Who8MyLunch/Snail | 86f4093a884ceb6ba296afad50f238f439e4c2c4 | [
"MIT"
] | null | null | null | snail/Si5351_Clock/constants.py | Who8MyLunch/Snail | 86f4093a884ceb6ba296afad50f238f439e4c2c4 | [
"MIT"
] | null | null | null | # PLL index number
PLL_A = 0
PLL_B = 1
# Crystal's internal load capacitance
XTAL_CL_6PF = 0b01
XTAL_CL_8PF = 0b10
XTAL_CL_10PF = 0b11
XTAL_CL_DEF = XTAL_CL_10PF
# Input clock dividers, Si5351C only
CLKIN_DIV_1 = 0b00
CLKIN_DIV_2 = 0b01
CLKIN_DIV_4 = 0b10
CLKIN_DIV_8 = 0b11
##########################################
# PLL source select
PLL_SRC_XTAL = 0
PLL_SRC_CLKIN = 1 # Si5351C only
PLL_SRC_DEF = PLL_SRC_XTAL
##########################################
# MultiSynth source select
MS_SRC_PLL_A = PLL_A
MS_SRC_PLL_B = PLL_B
MS_SRC_PLL_DEF = MS_SRC_PLL_A
# MS divide by 4
MS_DIVBY4_ENABLE = 0b11
MS_DIVBY4_DISABLE = 0b00
##########################################
# Clock output enable/disable
CLK_OEB_ENABLE = 0
CLK_OEB_DISABLE = 1
# Clock powered
CLK_PDN_ON = 0
CLK_PDN_OFF = 1
# Clock state when disabled
CLK_DIS_STATE_LOW = 0b00
CLK_DIS_STATE_HIGH = 0b01
CLK_DIS_STATE_HIGH_IMP = 0b10
CLK_DIS_STATE_NEVER = 0b11
# Clock drive current (mA)
CLK_IDRV_2 = 0b00
CLK_IDRV_4 = 0b01
CLK_IDRV_6 = 0b10
CLK_IDRV_8 = 0b11
# Clock source
CLK_SRC_XTAL = 0b00
CLK_SRC_CLKIN = 0b01
CLK_SRC_MS04 = 0b10
CLK_SRC_MS = 0b11
# Clock initial phase offset
CLK_PHOFF_ZERO = 0
# Clock inverted (180 deg phase shift)
CLK_INV_TRUE = 1
CLK_INV_FALSE = 0
# Clock output divider values
R_DIV_1 = 0b000
R_DIV_2 = 0b001
R_DIV_4 = 0b010
R_DIV_8 = 0b011
R_DIV_16 = 0b100
R_DIV_32 = 0b101
R_DIV_64 = 0b110
R_DIV_128 = 0b111
# Fanout enable/disable for CLKIN_FANOUT_EN, XO_FANOUT_EN, and MS_FANOUT_EN
FANOUT_ENABLE = 0b1
FANOUT_DISABLE = 0b0
#-----------------------------------------
if __name__ == '__main__':
pass
| 18.931034 | 75 | 0.677596 |
b3bf4481094d83b5d7fc682b1308eb40e0434fd7 | 15,028 | py | Python | software/mavlink/mavlink/pymavlink/generator/mavgen_swift.py | tucuongbrt/PIFer | e2ac4d4443e1c6a6263f91c32f28dbe767590359 | [
"MIT"
] | 10 | 2021-03-15T03:58:06.000Z | 2021-12-30T15:33:38.000Z | software/mavlink/mavlink/pymavlink/generator/mavgen_swift.py | tucuongbrt/PIFer | e2ac4d4443e1c6a6263f91c32f28dbe767590359 | [
"MIT"
] | 4 | 2021-05-03T16:58:53.000Z | 2021-12-21T21:01:02.000Z | Library/src/mavlink/pymavlink/generator/mavgen_swift.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 9 | 2021-04-28T15:26:34.000Z | 2021-12-21T20:41:30.000Z | #!/usr/bin/env python
"""
Parse a MAVLink protocol XML file and generate Swift implementation
Copyright Max Odnovolyk 2015
Released under GNU GPL version 3 or later
"""
from __future__ import print_function
import os
from . import mavparse, mavtemplate
abbreviations = ["MAV", "PX4", "UDB", "PPZ", "PIXHAWK", "SLUGS", "FP", "ASLUAV", "VTOL", "ROI", "UART", "UDP", "IMU", "IMU2", "3D", "RC", "GPS", "GPS1", "GPS2", "NED", "RTK", "ADSB"]
swift_keywords = ["associatedtype", "class", "deinit", "enum", "extension", "fileprivate", "func", "import", "init", "inout", "internal", "let", "open", "operator", "private", "protocol",
"public", "static", "struct", "subscript", "typealias", "var", "break" "case", "continue", "default", "defer", "do", "else", "fallthrough", "for", "guard", "if", "in", "repeat", "return", "switch",
"where", "while", "Any", "catch", "false", "is", "nil", "rethrows", "super", "self", "Self", "throw", "throws", "true", "try"]
swift_types = {'char' : ("String", '"\\0"', "string(at: %u, length: %u)", "set(%s, at: %u, length: %u)"),
'uint8_t' : ("UInt8", 0, "number(at: %u)", "set(%s, at: %u)"),
'int8_t' : ("Int8", 0, "number(at: %u)", "set(%s, at: %u)"),
'uint16_t' : ("UInt16", 0, "number(at: %u)", "set(%s, at: %u)"),
'int16_t' : ("Int16", 0, "number(at: %u)", "set(%s, at: %u)"),
'uint32_t' : ("UInt32", 0, "number(at: %u)", "set(%s, at: %u)"),
'int32_t' : ("Int32", 0, "number(at: %u)", "set(%s, at: %u)"),
'uint64_t' : ("UInt64", 0, "number(at: %u)", "set(%s, at: %u)"),
'int64_t' : ("Int64", 0, "number(at: %u)", "set(%s, at: %u)"),
'float' : ("Float", 0, "number(at: %u)", "set(%s, at: %u)"),
'double' : ("Double", 0, "number(at: %u)", "set(%s, at: %u)"),
'uint8_t_mavlink_version' : ("UInt8", 0, "number(at: %u)", "set(%s, at: %u)")}
t = mavtemplate.MAVTemplate()
def generate_mavlink(directory, filelist, xml_list, msgs):
print("Generating MAVLink.swift file")
mavparse.mkdir_p(directory)
filename = 'MAVLink.swift'
filepath = os.path.join(directory, filename)
outf = open(filepath, "w")
generate_header(outf, filelist, xml_list, filename)
append_static_code('MAVLink.swift', outf)
generate_message_mappings_array(outf, msgs)
generate_message_lengths_array(outf, msgs)
generate_message_crc_extra_array(outf, msgs)
outf.close()
def generate_header(outf, filelist, xml_list, filename):
"""Generate Swift file header with source files list and creation date"""
t.write(outf, """
//
// ${FILENAME}
// MAVLink Protocol Swift Library
//
// Generated from ${FILELIST} on ${PARSE_TIME} by mavgen_swift.py
// https://github.com/modnovolyk/MAVLinkSwift
//
""", {'FILENAME' : filename,
'FILELIST' : ", ".join(filelist),
'PARSE_TIME' : xml_list[0].parse_time})
def generate_enums(directory, filelist, xml_list, enums):
"""Iterate through all enums and create Swift equivalents"""
print("Generating Enumerations")
for enum in enums:
if enum.is_a_bitmask:
continue
filename = "%s%sEnum.swift" % (enum.swift_name, enum.basename)
filepath = os.path.join(directory, filename)
outf = open(filepath, "w")
generate_header(outf, filelist, xml_list, filename)
t.write(outf, """
${formatted_description}public enum ${swift_name}: ${raw_value_type} {
${{entry:${formatted_description}\tcase ${swift_name} = ${value}\n}}
}
extension ${swift_name}: Enumeration {
public static var typeName = "${name}"
public static var typeDescription = "${entity_description}"
public static var allMembers = [${all_entities}]
public static var membersDescriptions = [${entities_info}]
public static var enumEnd = UInt(${enum_end})
}
""", enum)
outf.close()
def generate_optionsets(directory, filelist, xml_list, enums):
"""Iterate through all enums and create Swift equivalents"""
print("Generating Enumerations")
for enum in enums:
if not enum.is_a_bitmask:
continue
for entry in enum.entry:
entry.parent_swift_name = enum.swift_name
filename = "%s%sOptionSet.swift" % (enum.swift_name, enum.basename)
filepath = os.path.join(directory, filename)
outf = open(filepath, "w")
generate_header(outf, filelist, xml_list, filename)
t.write(outf, """
${formatted_description}public struct ${swift_name}: OptionSet {
\tpublic let rawValue: ${raw_value_type}
\tpublic init(rawValue: ${raw_value_type}) {
\t\tself.rawValue = rawValue
\t}
${{entry:${formatted_description}\tpublic static let ${swift_name} = ${parent_swift_name}(rawValue: ${value})\n}}
}
extension ${swift_name}: MAVLinkBitmask {
public static var typeName = "${name}"
public static var typeDescription = "${entity_description}"
public static var allMembers = [${all_entities}]
public static var membersDescriptions = [${entities_info}]
public static var enumEnd = UInt(${enum_end})
}
""", enum)
outf.close()
def get_enum_raw_type(enum, msgs):
"""Search appropirate raw type for enums in messages fields"""
for msg in msgs:
for field in msg.fields:
if field.enum == enum.name:
return swift_types[field.type][0]
return "Int"
def generate_messages(directory, filelist, xml_list, msgs):
"""Generate Swift structs to represent all MAVLink messages"""
print("Generating Messages")
for msg in msgs:
filename = "%s%sMsg.swift" % (msg.swift_name, msg.basename)
filepath = os.path.join(directory, filename)
outf = open(filepath, "w")
generate_header(outf, filelist, xml_list, filename)
t.write(outf, """
import Foundation
${formatted_description}public struct ${swift_name} {
${{fields:${formatted_description}\tpublic let ${swift_name}: ${return_type}\n}}
}
extension ${swift_name}: Message {
public static let id = UInt8(${id})
public static var typeName = "${name}"
public static var typeDescription = "${message_description}"
public static var fieldDefinitions: [FieldDefinition] = [${fields_info}]
public init(data: Data) throws {
${{ordered_fields:\t\t${init_accessor} = ${initial_value}\n}}
}
public func pack() throws -> Data {
var payload = Data(count: ${wire_length})
${{ordered_fields:\t\ttry payload.${payload_setter}\n}}
return payload
}
}
""", msg)
outf.close()
def append_static_code(filename, outf):
"""Open and copy static code from specified file"""
basepath = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(basepath, 'swift/%s' % filename)
print("Appending content of %s" % filename)
with open(filepath) as inf:
for line in inf:
outf.write(line)
def generate_message_mappings_array(outf, msgs):
"""Create array for mapping message Ids to proper structs"""
classes = []
for msg in msgs:
classes.append("%u: %s.self" % (msg.id, msg.swift_name))
t.write(outf, """
/// Array for mapping message id to proper struct
private let messageIdToClass: [UInt8: Message.Type] = [${ARRAY_CONTENT}]
""", {'ARRAY_CONTENT' : ", ".join(classes)})
def generate_message_lengths_array(outf, msgs):
"""Create array with message lengths to validate known message lengths"""
# form message lengths array
lengths = []
for msg in msgs:
lengths.append("%u: %u" % (msg.id, msg.wire_length))
t.write(outf, """
/// Message lengths array for known messages length validation
private let messageLengths: [UInt8: UInt8] = [${ARRAY_CONTENT}]
""", {'ARRAY_CONTENT' : ", ".join(lengths)})
def generate_message_crc_extra_array(outf, msgs):
"""Add array with CRC extra values to detect incompatible XML changes"""
crcs = []
for msg in msgs:
crcs.append("%u: %u" % (msg.id, msg.crc_extra))
t.write(outf, """
/// Message CRSs extra for detection incompatible XML changes
private let messageCRCsExtra: [UInt8: UInt8] = [${ARRAY_CONTENT}]
""", {'ARRAY_CONTENT' : ", ".join(crcs)})
def camel_case_from_underscores(string):
"""Generate a CamelCase string from an underscore_string"""
components = string.split('_')
string = ''
for component in components:
if component in abbreviations:
string += component
else:
string += component[0].upper() + component[1:].lower()
return string
def lower_camel_case_from_underscores(string):
"""Generate a lower-cased camelCase string from an underscore_string"""
components = string.split('_')
string = components[0].lower()
for component in components[1:]:
string += component[0].upper() + component[1:].lower()
return string
def contains_a_bitmask(enums, enumName):
for enum in enums:
if enum.name == enumName:
return enum.is_a_bitmask
return False
def enum_is_a_bitmask(enum):
values = []
for entry in enum.entry:
values.append(entry.value)
values.sort()
for i, value in enumerate(values):
if 2 ** i != value:
return False
return True
def generate_enums_type_info(enums, msgs):
"""Add camel case swift names for enums an entries, descriptions and sort enums alphabetically"""
for enum in enums:
enum.swift_name = camel_case_from_underscores(enum.name)
enum.raw_value_type = get_enum_raw_type(enum, msgs)
enum.formatted_description = ""
if enum.description:
enum.description = " ".join(enum.description.split())
enum.formatted_description = "/// %s\n" % enum.description
for index, entry in enumerate(enum.entry):
if entry.name.endswith("_ENUM_END"):
enum.enum_end = entry.value
del enum.entry[index]
all_entities = []
entities_info = []
for entry in enum.entry:
name = entry.name.replace(enum.name + '_', '')
"""Ensure that enums entry name does not start from digit"""
if name[0].isdigit():
name = "MAV_" + name
entry.swift_name = lower_camel_case_from_underscores(name)
"""Ensure that enums entry name does not match any swift keyword"""
if entry.swift_name in swift_keywords:
entry.swift_name = lower_camel_case_from_underscores("MAV_" + name)
entry.formatted_description = ""
if entry.description:
entry.description = " ".join(entry.description.split())
entry.formatted_description = "\n\t/// " + entry.description + "\n"
all_entities.append(entry.swift_name)
entities_info.append('("%s", "%s")' % (entry.name, entry.description.replace('"','\\"')))
enum.all_entities = ", ".join(all_entities)
enum.entities_info = ", ".join(entities_info)
enum.entity_description = enum.description.replace('"','\\"')
enum.is_a_bitmask = enum_is_a_bitmask(enum)
enums.sort(key = lambda enum : enum.swift_name)
def generate_messages_type_info(msgs, enums):
"""Add proper formated variable names, initializers and type names to use in templates"""
for msg in msgs:
msg.swift_name = camel_case_from_underscores(msg.name)
msg.formatted_description = ""
if msg.description:
msg.description = " ".join(msg.description.split())
msg.formatted_description = "/// %s\n" % " ".join(msg.description.split())
msg.message_description = msg.description.replace('"','\\"')
for field in msg.ordered_fields:
field.swift_name = lower_camel_case_from_underscores(field.name)
field.init_accessor = field.swift_name if field.swift_name != "data" else "self.%s" % field.swift_name
field.pack_accessor = field.swift_name if field.swift_name != "payload" else "self.%s" % field.swift_name
field.return_type = swift_types[field.type][0]
# configure fields initializers
if field.enum:
# handle enums
field.return_type = camel_case_from_underscores(field.enum)
if contains_a_bitmask(enums, field.enum):
field.initial_value = "try data.bitmask(at: %u)" % field.wire_offset
else:
field.initial_value = "try data.enumeration(at: %u)" % field.wire_offset
field.payload_setter = "set(%s, at: %u)" % (field.pack_accessor, field.wire_offset)
elif field.array_length > 0:
if field.return_type == "String":
# handle strings
field.initial_value = "try data." + swift_types[field.type][2] % (field.wire_offset, field.array_length)
field.payload_setter = swift_types[field.type][3] % (field.pack_accessor, field.wire_offset, field.array_length)
else:
# other array types
field.return_type = "[%s]" % field.return_type
field.initial_value = "try data.array(at: %u, capacity: %u)" % (field.wire_offset, field.array_length)
field.payload_setter = "set(%s, at: %u, capacity: %u)" % (field.pack_accessor, field.wire_offset, field.array_length)
else:
# simple type field
field.initial_value = "try data." + swift_types[field.type][2] % field.wire_offset
field.payload_setter = swift_types[field.type][3] % (field.pack_accessor, field.wire_offset)
field.formatted_description = ""
if field.description:
field.description = " ".join(field.description.split())
field.formatted_description = "\n\t/// " + field.description + "\n"
fields_info = ['("%s", %u, "%s", %u, "%s")' % (field.swift_name, field.wire_offset, field.return_type, field.array_length, field.description.replace('"','\\"')) for field in msg.fields]
msg.fields_info = ", ".join(fields_info)
msgs.sort(key = lambda msg : msg.id)
def generate(basename, xml_list):
"""Generate complete MAVLink Swift implemenation"""
msgs = []
enums = []
filelist = []
for xml in xml_list:
for msg in xml.message: msg.basename = xml.basename.title()
for enum in xml.enum: enum.basename = xml.basename.title()
msgs.extend(xml.message)
enums.extend(xml.enum)
filelist.append(os.path.basename(xml.filename))
generate_enums_type_info(enums, msgs)
generate_messages_type_info(msgs, enums)
generate_mavlink(basename, filelist, xml_list, msgs)
generate_enums(basename, filelist, xml_list, enums)
generate_optionsets(basename, filelist, xml_list, enums)
generate_messages(basename, filelist, xml_list, msgs) | 39.44357 | 215 | 0.624567 |
5a89ba2c7c36f029b3f0f8427d8e38534c5b46d0 | 1,021 | py | Python | indicate/data/get_espncricinfo/find_hindi_scorecards.py | soodoku/indic-transliterate | dc7537c0ca8d9a46f45e67e49be0d3eb478917b8 | [
"MIT"
] | 3 | 2021-11-11T02:38:27.000Z | 2021-12-19T03:56:58.000Z | indicate/data/get_espncricinfo/find_hindi_scorecards.py | soodoku/indic-transliterate | dc7537c0ca8d9a46f45e67e49be0d3eb478917b8 | [
"MIT"
] | null | null | null | indicate/data/get_espncricinfo/find_hindi_scorecards.py | soodoku/indic-transliterate | dc7537c0ca8d9a46f45e67e49be0d3eb478917b8 | [
"MIT"
] | null | null | null | import json
import datetime
import requests
from espncricinfo.match import Match
hindi_matches = []
players = []
with open("all-matches.json", "r") as matches_file:
matches = json.load(matches_file)
possible_matches = [x for x in matches if x > 1230000]
print(len(possible_matches))
for match_id in possible_matches:
try:
match = Match(match_id)
except Exception:
continue
y, m, d = [int(x) for x in match.date.split("-")]
if datetime.datetime(y, m, d) > datetime.datetime(2021, 4, 8):
print(match_id)
r = requests.get(match.match_url)
hindi_url = r.url.replace("/series/", "/hindi/series/").replace("live-cricket-score", "full-scorecard")
r = requests.get(hindi_url)
if r.status_code == 404:
hindi_url = None
if hindi_url:
hindi_matches.append(match_id)
with open("hindi_matches.json", "w") as outfile:
json.dump(hindi_matches, outfile)
| 32.935484 | 115 | 0.612145 |
8cb6c7da561685d0f219ac48fb4657dfb6a047d3 | 1,555 | py | Python | packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_tickfont.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_tickfont.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_tickfont.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
| 38.875 | 87 | 0.558199 |
8cf458d831ed2825be5a79ef2aafba029b33ccb4 | 1,076 | py | Python | frontend/migrations/versions/15e17ac2f68f_zenodo_make_upload_bytes_be_bigint.py | Daniel-Mietchen/wdumper | 59dba4d8843c376cc2d483382fa8f8155e3d3907 | [
"MIT"
] | null | null | null | frontend/migrations/versions/15e17ac2f68f_zenodo_make_upload_bytes_be_bigint.py | Daniel-Mietchen/wdumper | 59dba4d8843c376cc2d483382fa8f8155e3d3907 | [
"MIT"
] | null | null | null | frontend/migrations/versions/15e17ac2f68f_zenodo_make_upload_bytes_be_bigint.py | Daniel-Mietchen/wdumper | 59dba4d8843c376cc2d483382fa8f8155e3d3907 | [
"MIT"
] | null | null | null | """zenodo: make upload_bytes be bigint
Revision ID: 15e17ac2f68f
Revises: a95ef06204fe
Create Date: 2019-08-18 14:56:11.878451
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '15e17ac2f68f'
down_revision = 'a95ef06204fe'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('zenodo', 'uploaded_bytes',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.BigInteger(),
existing_nullable=False,
existing_server_default=sa.text('0'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('zenodo', 'uploaded_bytes',
existing_type=sa.BigInteger(),
type_=mysql.INTEGER(display_width=11),
existing_nullable=False,
existing_server_default=sa.text('0'))
# ### end Alembic commands ###
| 29.081081 | 65 | 0.658922 |
7579c4fd18c3713ba4d0bc4a966449e701400913 | 2,907 | py | Python | kale/rpc/rpc_client.py | grayfallstown/kale-blockchain | 1c426a15967813ece30f77b9b7584f2dc5ed554a | [
"Apache-2.0"
] | 23 | 2021-07-05T00:55:47.000Z | 2022-02-09T04:30:08.000Z | kale/rpc/rpc_client.py | grayfallstown/kale-blockchain | 1c426a15967813ece30f77b9b7584f2dc5ed554a | [
"Apache-2.0"
] | 14 | 2021-07-05T13:08:25.000Z | 2021-08-23T18:57:02.000Z | kale/rpc/rpc_client.py | grayfallstown/kale-blockchain | 1c426a15967813ece30f77b9b7584f2dc5ed554a | [
"Apache-2.0"
] | 5 | 2021-07-06T23:24:39.000Z | 2022-03-15T08:43:03.000Z | import asyncio
from ssl import SSLContext
from typing import Dict, List, Optional, Any
import aiohttp
from kale.server.server import NodeType, ssl_context_for_client
from kale.server.ssl_context import private_ssl_ca_paths
from kale.types.blockchain_format.sized_bytes import bytes32
from kale.util.byte_types import hexstr_to_bytes
from kale.util.ints import uint16
class RpcClient:
"""
Client to Kale RPC, connects to a local service. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Kale's
protocol on top of TCP), it's a separate protocol on top of HTTP thats provides easy access
to the full node.
"""
url: str
session: aiohttp.ClientSession
closing_task: Optional[asyncio.Task]
ssl_context: Optional[SSLContext]
@classmethod
async def create(cls, self_hostname: str, port: uint16, root_path, net_config):
self = cls()
self.url = f"https://{self_hostname}:{str(port)}/"
self.session = aiohttp.ClientSession()
ca_crt_path, ca_key_path = private_ssl_ca_paths(root_path, net_config)
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
self.closing_task = None
return self
async def fetch(self, path, request_json) -> Any:
async with self.session.post(self.url + path, json=request_json, ssl_context=self.ssl_context) as response:
response.raise_for_status()
res_json = await response.json()
if not res_json["success"]:
raise ValueError(res_json)
return res_json
async def get_connections(self, node_type: Optional[NodeType] = None) -> List[Dict]:
request = {}
if node_type is not None:
request["node_type"] = node_type.value
response = await self.fetch("get_connections", request)
for connection in response["connections"]:
connection["node_id"] = hexstr_to_bytes(connection["node_id"])
return response["connections"]
async def open_connection(self, host: str, port: int) -> Dict:
return await self.fetch("open_connection", {"host": host, "port": int(port)})
async def close_connection(self, node_id: bytes32) -> Dict:
return await self.fetch("close_connection", {"node_id": node_id.hex()})
async def stop_node(self) -> Dict:
return await self.fetch("stop_node", {})
def close(self):
self.closing_task = asyncio.create_task(self.session.close())
async def await_closed(self):
if self.closing_task is not None:
await self.closing_task
| 40.375 | 115 | 0.69109 |
51d3c76c61989c9a775131f23d659303af85918d | 16,343 | py | Python | python/oneflow/test/modules/test_conv1d.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_conv1d.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_conv1d.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
import oneflow.nn as nn
import oneflow.unittest
def _test_conv1d_bias_false(test_case, device):
np_arr = np.array([[[1.28795946, -0.2921792, 0.20338029, 0.78604293, -1.89607573]]])
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[[0.10197904, 0.3372305, -0.25743008]],
[[0.27720425, -0.52435774, -0.38381988]],
[[0.56016803, -0.10063095, -0.10760903]],
]
)
m = nn.Conv1d(1, 3, 3, stride=1, bias=False)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m = m.to(device)
output = m(input)
np_out = np.array(
[
[
[-0.01954307, -0.16356121, 0.77392507],
[0.43217283, -0.48933625, 0.37196174],
[0.72899038, -0.2687211, 0.23886177],
]
]
)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[[[0.93935132, 0.65159315, -0.09726584, -1.03661716, -0.74885899]]]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_bias_true(test_case, device):
np_arr = np.array(
[
[
[0.90499806, -1.11683071, 0.71605605, -0.56754625, 0.61944169],
[-0.31317389, -0.26271924, 0.95579433, 0.52468461, 1.48926127],
]
]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[
[0.01997352, 0.23834395, 0.00526353],
[-0.04861857, -0.22751901, -0.06725175],
],
[
[0.13344523, -0.35202524, 0.15168799],
[-0.25714493, -0.17459838, 0.28768948],
],
[
[0.10671382, -0.28205597, -0.39752254],
[0.36393702, 0.07843742, -0.33898622],
],
[
[0.20485674, 0.04222689, -0.1898618],
[0.22519711, -0.15910202, -0.35057363],
],
]
)
bias = np.array([0.01012857, 0.38912651, -0.01600273, -0.3883304])
m = nn.Conv1d(2, 4, 3, stride=1, bias=True)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m.bias = flow.nn.Parameter(flow.Tensor(bias))
m = m.to(device)
np_out = np.array(
[
[
[-0.22349545, -0.08447243, -0.37358052],
[1.4130373, -0.04644597, 0.86949122],
[-0.34765026, -0.31004351, -0.14158708],
[-0.74985039, -0.87430149, -0.77354753],
]
]
)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[0.4649893, 0.11147892, -0.3189539, -0.78394318, -0.43043283],
[0.28337064, -0.19941133, -0.66853344, -0.95190406, -0.46912211],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_dilation(test_case, device):
np_arr = np.array(
[[[-0.43016902, 1.74619496, -0.57338119, 0.25563857, 0.12575546]]]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[[-0.35057205, -0.31304273, 0.46250814]],
[[-0.40786612, 0.36518192, 0.46280444]],
[[-0.00921835, -0.38710043, 0.47566161]],
]
)
m = nn.Conv1d(1, 3, 3, stride=1, bias=False)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m = m.to(device)
output = m(input)
np_out = np.array(
[
[
[-0.66102189, -0.31443936, 0.17914855],
[0.54776692, -0.8032915, 0.38541752],
[-0.94472277, 0.32745653, -0.03385513],
]
]
)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[[[-0.76765651, -1.10261774, 0.29835641, 1.06601286, 1.40097415]]]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_stride(test_case, device):
np_arr = np.array(
[[[-1.01312506, -0.40687919, 1.5985316, 0.53594196, -1.89935565]]]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[[0.5751484, 0.26589182, -0.026546]],
[[-0.10313249, -0.20797005, -0.48268208]],
[[-0.22216944, -0.14962578, 0.57433963]],
]
)
m = nn.Conv1d(1, 3, 3, stride=2, bias=False)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m = m.to(device)
output = m(input)
np_out = np.array(
[
[
[-0.73331773, 1.11231577],
[-0.58247775, 0.64046454],
[1.20406508, -1.5262109],
]
]
)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[[[0.24984647, -0.09170401, 0.31495798, -0.09170401, 0.06511152]]]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_group_bias_true(test_case, device):
np_arr = np.array(
[
[
[1.48566079, 0.54937589, 0.62353903, -0.94114172, -0.60260266],
[0.61150503, -0.50289607, 1.41735041, -1.85877609, -1.04875529],
]
]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[[0.25576305, 0.40814576, -0.05900212]],
[[-0.24829513, 0.42756805, -0.01354307]],
[[0.44658303, 0.46889144, 0.41060263]],
[[0.30083328, -0.5221613, 0.12215579]],
]
)
bias = np.array([-0.03368823, -0.4212504, -0.42130581, -0.17434336])
m = nn.Conv1d(2, 4, 3, groups=2, stride=1, bias=True)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m.bias = flow.nn.Parameter(flow.Tensor(bias))
m = m.to(device)
np_out = np.array(
[
[
[0.53372419, 0.41684598, -0.22277816],
[-0.56368178, -0.27830642, -0.97031319],
[0.19794616, -0.74452549, -1.09052706],
[0.44534814, -1.29277706, 1.09451222],
]
]
)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[0.00746793, 0.84318173, 0.77063656, 0.76316863, -0.07254519],
[0.74741632, 0.69414645, 1.22690487, 0.47948855, 0.53275841],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_group_large_out_bias_true(test_case, device):
np_arr = np.array(
[
[
[2.17964911, 0.91623521, 1.24746692, 0.73605931, -0.23738743],
[-0.70412433, 0.10727754, 1.0207864, -0.09711888, -1.10814202],
]
]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[[-0.207307473, 0.12856324, 0.371991515]],
[[-0.416422307, 3.26921181e-05, -0.385845661]],
[[-0.182592362, 0.143281639, 0.419321984]],
[[-0.27117458, 0.0421470925, 0.377335936]],
[[0.546190619, -0.211819887, -0.29785803]],
[[0.334832489, 0.255918801, -0.0556600206]],
]
)
bias = np.array(
[-0.56865668, 0.17631066, -0.43992457, -0.24307285, -0.53672957, -0.52927947]
)
m = nn.Conv1d(2, 6, 3, groups=2, stride=1, bias=True)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m.bias = flow.nn.Parameter(flow.Tensor(bias))
m = m.to(device)
np_out = np.array(
[
[
[-0.43867296, -0.32441288, -0.82094181],
[-1.21264362, -0.48919463, -0.25154343],
[-0.18354186, -0.11983716, -0.66178048],
[0.33756858, -0.26578707, -0.9421193],
[-1.2480886, -0.66543078, 0.37145507],
[-0.79440582, -0.22671542, -0.15066233],
]
]
)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[-0.8063221, -0.53444451, -0.12897667, 0.6773454, 0.40546784],
[0.6098485, 0.69609451, 0.71991241, 0.1100639, 0.02381789],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_group_large_in_bias_true(test_case, device):
np_arr = np.array(
[
[
[0.7382921, 0.3227571, -0.73204273, -0.01697334, 1.72585976],
[0.52866709, 0.28417364, 1.12931311, 1.73048413, -0.60748184],
[0.43222603, 0.7882517, -0.62105948, 0.10097823, 0.81639361],
[0.36671457, 0.24468753, -0.5824874, -0.74464536, -0.38901371],
]
]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[
[-0.29574063, -0.31176069, 0.17234495],
[0.06092392, 0.30691007, -0.36685407],
],
[
[0.26149744, 0.07149458, 0.3209756],
[0.18960869, -0.37148297, -0.13602243],
],
]
)
bias = np.array([-0.35048512, -0.0093792])
m = nn.Conv1d(4, 2, 3, groups=2, stride=1, bias=True)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m.bias = flow.nn.Parameter(flow.Tensor(bias))
m = m.to(device)
np_out = np.array(
[[[-1.09048378, -0.49156523, 0.99150705], [0.01852397, 0.54882324, 0.31657016]]]
)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[-0.29574063, -0.60750133, -0.43515638, -0.13941574, 0.17234495],
[0.06092392, 0.36783397, 0.0009799, -0.059944, -0.36685407],
[0.26149744, 0.33299202, 0.65396762, 0.39247018, 0.3209756],
[0.18960869, -0.18187428, -0.31789672, -0.50750542, -0.13602243],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv1d_compilcate(test_case, device):
np_arr = np.array(
[
[
[-1.00674784, 0.51784992, 0.39896572, 0.11018554, 0.91136694],
[1.95886874, 0.89779067, 0.4748213, 0.33313531, -0.49350029],
[-0.19280219, 0.04023677, 1.66438103, -0.83563608, 0.15925731],
[1.49166429, 1.45189261, -1.86512125, 0.34329697, 0.20413807],
]
]
)
input = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[
[-0.36045218, 0.37349278, 0.04565236],
[0.0242328, -0.09459515, -0.30684742],
],
[
[-0.30345008, -0.1196513, -0.26765293],
[0.09876197, 0.03346226, 0.2748405],
],
[
[-0.37798449, 0.00242459, -0.34125558],
[-0.05174343, -0.10443231, 0.09526101],
],
[
[0.34196907, -0.32667893, 0.40264183],
[0.38025281, 0.26807079, -0.09074812],
],
]
)
bias = np.array([-0.03499984, -0.21616256, 0.13312563, -0.24104381])
m = nn.Conv1d(4, 4, 3, groups=2, stride=2, padding=2, dilation=2, bias=True)
m.weight = flow.nn.Parameter(flow.Tensor(weight))
m.bias = flow.nn.Parameter(flow.Tensor(bias))
m = m.to(device)
np_out = np.array(
[
[
[-0.72379637, 0.67248386, 0.21977007],
[-0.00643994, -0.1286152, -0.41589433],
[-0.76877236, 0.29273134, -0.42040929],
[1.0612179, -0.73787093, -0.37839717],
]
]
)
output = m(input)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[-0.41006082, 0.0, -0.63206136, 0.0, 0.03184089],
[0.06186188, 0.0, 0.02985496, 0.0, -0.09313981],
[-0.36026976, 0.0, -0.2988835, 0.0, -0.26286808],
[0.49214786, 0.0, 0.49666074, 0.0, 0.16815135],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
@flow.unittest.skip_unless_1n1d()
class TestConv1d(flow.unittest.TestCase):
def test_conv1d(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_conv1d_bias_true,
_test_conv1d_bias_false,
_test_conv1d_dilation,
_test_conv1d_stride,
_test_conv1d_group_bias_true,
_test_conv1d_group_large_out_bias_true,
_test_conv1d_group_large_in_bias_true,
_test_conv1d_compilcate,
]
arg_dict["device"] = ["cuda", "cpu"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_conv1d_with_random_data(test_case):
channels = random(1, 6)
m = torch.nn.Conv1d(
in_channels=channels,
out_channels=random(1, 20),
kernel_size=random(1, 4),
stride=random() | nothing(),
padding=random(1, 3).to(int) | nothing(),
dilation=random(1, 5) | nothing(),
groups=random(1, 5) | nothing(),
padding_mode=constant("zeros") | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor(ndim=3, dim1=channels).to(device)
y = m(x)
return y
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@autotest(n=30, check_allclose=False)
def test_conv1d_group_with_random_data(test_case):
channels = 720 # lcm(1, 2, 3, 4, 5, 6)
m = torch.nn.Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=random(1, 4),
stride=random() | nothing(),
padding=random(1, 3).to(int) | nothing(),
dilation=random(1, 5) | nothing(),
groups=random(1, 7),
padding_mode=constant("zeros") | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
m.pytorch.to("cuda")
x = random_tensor(ndim=3, dim1=channels).to(device)
x.pytorch = x.pytorch.to("cuda")
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| 33.696907 | 88 | 0.539987 |
1192a10b7b21014449c3801a620cfe56582ca79d | 2,509 | py | Python | tests/test_upnpservice_av_transport.py | svilborg/samsungtv | 007d8cbf4326ae13d74ab138eb77798d11aae108 | [
"Apache-2.0"
] | 12 | 2018-05-07T10:27:15.000Z | 2021-01-03T23:39:41.000Z | tests/test_upnpservice_av_transport.py | svilborg/samsungtv | 007d8cbf4326ae13d74ab138eb77798d11aae108 | [
"Apache-2.0"
] | 1 | 2020-07-12T20:51:51.000Z | 2020-07-12T20:51:51.000Z | tests/test_upnpservice_av_transport.py | svilborg/samsungtv | 007d8cbf4326ae13d74ab138eb77798d11aae108 | [
"Apache-2.0"
] | 2 | 2019-08-20T00:48:10.000Z | 2020-09-20T10:09:17.000Z | from unittest import TestCase
from samsungtv.upnpservice import UPnPServiceAVTransport
class TestUPnPServiceAVTransport(TestCase):
def test_init(self):
t = UPnPServiceAVTransport('192.168.0.1')
self.skipTest("Todo")
# def test_real(self):
# print "UPnPServiceAVTransport \n"
#
# import pprint
# import time
#
# t = UPnPServiceAVTransport('192.168.0.100', '9197', config={'controlURL': '/dmr/upnp/control/AVTransport1'})
# print "===================="
# pprint.pprint(t.device_cap())
# print "===================="
# pprint.pprint(t.get_transport_info())
# print "===================="
# pprint.pprint(t.get_transport_settings())
# print t.set_url("http://192.168.0.103:8000/media/t.mp4")
# print t.set_url("http://192.168.0.103:8000/media/test.jpg#1")
# print t.set_next_url("http://192.168.0.103:8000/media/test2.jpg#2")
# time.sleep(1)
# print "===================="
# print t.play()
# print "===================="
# pprint.pprint (t.get_position_info())
# time.sleep(2)
# print "===================="
# time.sleep(4)
# print "===================="
# print t.pause()
# print "===================="
# pprint.pprint(t.media_info())
# time.sleep(4)
# print t.next()
# print "===================="
# pprint.pprint(t.media_info())
# exit(1)
# # print t.set_url("http://192.168.0.103:8000/media/test.jpg")
# # print t.set_next_url("http://192.168.0.103:8000/media/test3.jpg")
# # print t.seek('REL_TIME', '00:00:05')
# # print t.prefetch_url("http://192.168.0.103:8000/media/test3.jpg")
# # print t.stop()
# def test_real_pic_rotate (self) :
# l = [
# 'http://i.imgur.com/6yHmlwT.jpg',
# 'http://i.imgur.com/qCoybZR.jpg',
# 'http://i.imgur.com/hl4mfZf.jpg',
# ]
#
# is_play=False
#
# for img in l:
#
# print img
#
# if not is_play:
# print t.set_url(img)
# print t.play()
#
# is_play=True
#
# time.sleep(1)
#
# else:
# print t.set_next_url(img)
# print t.next()
#
# time.sleep(5)
# pprint.pprint (t.get_transport_info())
# exit(1)
| 27.571429 | 118 | 0.472698 |
05197f47aa103e1ee01d2d931e0e974b0477d819 | 228 | py | Python | type_cast/type_cast_0.py | avkorablev/code_4_blog | fcda82f018d1de7c7b2a6d87f5f83f10a4477878 | [
"CC0-1.0"
] | null | null | null | type_cast/type_cast_0.py | avkorablev/code_4_blog | fcda82f018d1de7c7b2a6d87f5f83f10a4477878 | [
"CC0-1.0"
] | null | null | null | type_cast/type_cast_0.py | avkorablev/code_4_blog | fcda82f018d1de7c7b2a6d87f5f83f10a4477878 | [
"CC0-1.0"
] | null | null | null | # file name: type_cast_0.py
class A:
a = 'a'
class B(A):
b = 'b'
class DoSomethingWithA:
_class = A
def do(self) -> A:
return self._class()
class DoSomethingWithB(DoSomethingWithA):
_class = B
| 12 | 41 | 0.596491 |
25ff22aa99f23a8b64fac0e2bbdee0447aad83ef | 2,715 | py | Python | tests/common/postgres.py | epiphany-platform/cdl-temporary | daa704f379c05d7b733c382058ff88a2549d33d7 | [
"Apache-2.0"
] | 8 | 2020-11-04T10:32:03.000Z | 2021-09-08T16:46:13.000Z | tests/common/postgres.py | epiphany-platform/cdl-temporary | daa704f379c05d7b733c382058ff88a2549d33d7 | [
"Apache-2.0"
] | 418 | 2020-11-05T12:43:26.000Z | 2021-10-19T02:24:43.000Z | tests/common/postgres.py | epiphany-platform/cdl-temporary | daa704f379c05d7b733c382058ff88a2549d33d7 | [
"Apache-2.0"
] | 13 | 2020-11-18T12:37:42.000Z | 2021-09-16T07:43:27.000Z | import json
import psycopg2
class PostgresConfig:
def __init__(self,
user='postgres',
password='1234',
host='localhost',
port='5432',
dbname='postgres',
schema='public'):
self.user = user
self.password = password
self.host = host
self.port = port
self.dbname = dbname
self.schema = schema
def to_dict(self, app):
if app is None:
return {
"POSTGRES_USERNAME": self.user,
"POSTGRES_PASSWORD": self.password,
"POSTGRES_HOST": self.host,
"POSTGRES_PORT": self.port,
"POSTGRES_DBNAME": self.dbname,
"POSTGRES_SCHEMA": self.schema,
}
else:
return {
f"{app}_POSTGRES__USERNAME": self.user,
f"{app}_POSTGRES__PASSWORD": self.password,
f"{app}_POSTGRES__HOST": self.host,
f"{app}_POSTGRES__PORT": self.port,
f"{app}_POSTGRES__DBNAME": self.dbname,
f"{app}_POSTGRES__SCHEMA": self.schema,
}
def connect_to_postgres(config: PostgresConfig):
return psycopg2.connect(dbname=config.dbname,
user=config.user,
password=config.password,
host=config.host,
port=config.port)
def fetch_data(config: PostgresConfig):
db = connect_to_postgres(config)
curr = db.cursor()
curr.execute('SELECT * FROM data ORDER BY version')
rows = curr.fetchall()
rows = [{
'object_id': row[0],
'version': row[1],
'schema_id': row[2],
'payload': row[3]
} for row in rows]
curr.close()
db.close()
return rows
def insert_data(config: PostgresConfig, data):
db = connect_to_postgres(config)
curr = db.cursor()
for entry in data:
curr.execute(
'INSERT INTO data (object_id, version, schema_id, payload) VALUES (%s, %s, %s, %s)',
(entry['object_id'], entry['version'], entry['schema_id'],
json.dumps(entry['payload'])))
db.commit()
curr.close()
db.close()
def clear_data(config: PostgresConfig):
db = connect_to_postgres(config)
curr = db.cursor()
curr.execute('DELETE FROM data')
db.commit()
curr.close()
db.close()
def clear_relations(config: PostgresConfig):
db = connect_to_postgres(config)
curr = db.cursor()
curr.execute('DELETE FROM edges')
curr.execute('DELETE FROM relations')
db.commit()
curr.close()
db.close()
| 26.617647 | 96 | 0.542173 |
60416c88f8fed41716f5864f431d2fe70225b5cc | 3,132 | py | Python | Day15/Day15.py | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | Day15/Day15.py | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | Day15/Day15.py | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | with open('Day15 input.txt') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
#only roughly remember dijkstra's algorithm and intentionally not looking it up again
# of coordinates:[total value risk if moved,and path so far]
#structure is...
# (consideration_coord_tuple):list(cum_risk_value,list(coord_path))
consider_dict = {(0,1):[int(lines[0][1]),[(0,0)]],(1,0):[int(lines[1][0]),[(0,0)]]}
visited_dict = {(0,0):True}
# for k in consider_dict:
# visited_dict[k] = True
from copy import deepcopy
not_done = True
lowest_value = float('inf')
while not_done:
new_dict = {} #key is new_location, value is (summed risk, path).
for k in consider_dict:
#print('looking out starting at',k[0],k[1])
if k[0]+1 < len(lines) and (k[0]+1,k[1]) not in consider_dict[k][1]:
down = int(lines[k[0]+1][k[1]]) #could make cleaner
new_dict[(k[0]+1,k[1])] = deepcopy((consider_dict[k][0] + down,consider_dict[k][1])) #c
if new_dict[(k[0]+1,k[1])][1][-1] != k:
new_dict[(k[0]+1,k[1])][1].append(k)
if k[0]-1 > -1 and (k[0]-1,k[1]) not in consider_dict[k][1]:
up = int(lines[k[0]-1][k[1]])
new_dict[(k[0]-1,k[1])] = deepcopy((consider_dict[k][0] + up,consider_dict[k][1]))
if new_dict[(k[0]-1,k[1])][1] != k:
new_dict[(k[0]-1,k[1])][1].append(k)
if k[1]+1 < len(lines[0]) and (k[0],k[1]+1) not in consider_dict[k][1]:
right = int(lines[k[0]][k[1]+1])
new_dict[(k[0],k[1]+1)] = deepcopy((consider_dict[k][0] + right,consider_dict[k][1]))
if new_dict[(k[0],k[1]+1)][1] != k:
new_dict[(k[0],k[1]+1)][1].append(k)
if k[1]-1 > -1 and (k[0],k[1]-1) not in consider_dict[k][1]:
left = int(lines[k[0]][k[1]-1])
new_dict[(k[0],k[1]-1)] = deepcopy((consider_dict[k][0] + left,consider_dict[k][1]))
if new_dict[(k[0],k[1]-1)][1] != k:
new_dict[(k[0],k[1]-1)][1].append(k)
#winner struct is...
#list(tuple(new_dict_key,new_dict_value))
winner = [(None,[float('inf'),[None]])]
for k,v in new_dict.items():
if v[0] <= winner[-1][1][0]:
if winner[-1][1][1][0] is None:
winner = [(k,v)]
else:
winner.append((k,v))
for w in winner:
if w[1][0]!= float('inf'):
#print('winner',w)
consider_dict[w[0]] = w[1]
try:
_ = consider_dict.pop(w[1][1][-1])
#print('dict',consider_dict)
except: pass
lowest_value = min([val[0] if key==(9,9) else float('inf') for key,val in consider_dict.items()])
if lowest_value < float('inf'):
print(lowest_value)
if (9,9) in consider_dict:
_ = consider_dict.pop((9,9))
if len(consider_dict.keys())==0:
not_done = False
#concern about what happens if two paths hit the same point at the same time. old_loc gets overwritten
#a possible solution is to have the path as part of the key bundled as a tuple with the current loc
print(not_done) | 44.742857 | 102 | 0.549808 |
4438bee97902fa96e2b700ba94b1c8d2654158cf | 2,172 | py | Python | openfda/annotation_table/tests/extract_unii_test.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 388 | 2015-01-09T18:50:35.000Z | 2022-03-24T10:15:23.000Z | openfda/annotation_table/tests/extract_unii_test.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 150 | 2015-01-21T20:30:54.000Z | 2022-03-28T20:46:29.000Z | openfda/annotation_table/tests/extract_unii_test.py | hobochili/openfda | 9958c4bc3d04d2e9cfc75f9cd894ad07a45e9141 | [
"CC0-1.0"
] | 113 | 2015-01-31T21:24:16.000Z | 2022-01-30T15:17:28.000Z | #!/usr/bin/env python
import os
import unittest
from openfda.annotation_table import extract_unii as extract
from openfda.spl import extract as extract_spl
class UNIIExtractMethoxsalenUnitTest(unittest.TestCase):
'UNII Extract Unit Test'
def setUp(self):
self.tree = extract_spl.parse_xml(os.path.dirname(__file__) +
'/data/METHOXSALEN.xml')
def test_extract_setid(self):
expected_setid = 'ea1a6225-0eb9-4743-9601-23d5795936a3'
extracted_setid = extract.extract_set_id(self.tree)
self.assertEqual(expected_setid, extracted_setid, extracted_setid)
def test_extract_unii(self):
expected_unii = 'U4VJ29L7BQ'
extracted_unii = extract.extract_unii(self.tree)
self.assertEqual(expected_unii, extracted_unii, extracted_unii)
def test_extract_unii_name(self):
expected_unii_name = 'METHOXSALEN'
extracted_unii_name = extract.extract_unii_name(self.tree)
self.assertEqual(expected_unii_name,
extracted_unii_name,
extracted_unii_name)
def test_extract_unii_other_code(self):
expected_unii_other_code = ['N0000010217',
'N0000175984',
'N0000009801',
'N0000175879',
'N0000007909']
extracted_unii_other_code = extract.extract_unii_other_code(self.tree)
self.assertListEqual(expected_unii_other_code,
extracted_unii_other_code,
extracted_unii_other_code)
def test_extract_unii_other_name(self):
expected_unii_other_name = ['Photoabsorption [MoA]',
'Photoactivated Radical Generator [EPC]',
'Photosensitizing Activity [PE]',
'Psoralen [EPC]',
'Psoralens [Chemical/Ingredient]']
extracted_unii_other_name = extract.extract_unii_other_name(self.tree)
self.assertListEqual(expected_unii_other_name,
extracted_unii_other_name,
extracted_unii_other_name)
if __name__ == '__main__':
unittest.main()
| 37.448276 | 74 | 0.64825 |
e7dcd7191a818401c794773463c7d81547f80fbe | 2,345 | py | Python | ctera_gateway_openapi/api/sync.py | ctera/ctera-gateway-openapi | 0b37af6cd4b53dfe0f66f4dc75dc131e99c63233 | [
"Apache-2.0"
] | null | null | null | ctera_gateway_openapi/api/sync.py | ctera/ctera-gateway-openapi | 0b37af6cd4b53dfe0f66f4dc75dc131e99c63233 | [
"Apache-2.0"
] | null | null | null | ctera_gateway_openapi/api/sync.py | ctera/ctera-gateway-openapi | 0b37af6cd4b53dfe0f66f4dc75dc131e99c63233 | [
"Apache-2.0"
] | null | null | null | import copy
from ctera_gateway_openapi.api.ctera_exception_translator import handle_ctera_exception
from ctera_gateway_openapi.api.tokenizer import unpack_session_id
from ctera_gateway_openapi.managers.sync import SyncManager
@unpack_session_id
@handle_ctera_exception
def get_status(session_id=None, **_kwargs):
return _to_exteranl_sync_status(SyncManager(session_id=session_id).get_status())
@unpack_session_id
@handle_ctera_exception
def is_enabled(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).is_enabled()
@unpack_session_id
@handle_ctera_exception
def suspend(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).suspend()
@unpack_session_id
@handle_ctera_exception
def unsuspend(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).unsuspend()
@unpack_session_id
@handle_ctera_exception
def refresh(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).refresh()
@unpack_session_id
@handle_ctera_exception
def get_linux_avoid_using_fanotify(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).get_linux_avoid_using_fanotify()
@unpack_session_id
@handle_ctera_exception
def set_linux_avoid_using_fanotify(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).set_linux_avoid_using_fanotify(True)
@unpack_session_id
@handle_ctera_exception
def unset_linux_avoid_using_fanotify(session_id=None, **_kwargs):
return SyncManager(session_id=session_id).set_linux_avoid_using_fanotify(False)
def _to_exteranl_sync_status(server_object):
j = copy.deepcopy(server_object.__dict__)
j.pop('_classname')
for field in ['lastSuccessfulScanTime', 'lastFailedScanTime']:
if j.get(field) is not None:
j[field] += 'Z'
for field in [
'readSyncKnowledge',
'uploadFilesList',
'uploadExceptionsFilesList',
'downloadFilesList',
'downloadExceptionsFilesList',
'verifySyncFilesList',
'lastUploadFile',
'lastUploadDeleteFile',
'lastFailedUploadFile',
'lastDownloadFile',
'lastFailedDownloadFile'
]:
obj = j.get(field)
if obj is not None:
if hasattr(obj, 'time'):
obj.time += 'Z'
j[field] = obj.__dict__
return j
| 28.950617 | 87 | 0.753945 |
2549ffb8d140561fbed24fcb68cc648f8f0a2f82 | 4,343 | py | Python | vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py | huihongxiao/vmware-nsx | 00530f9207958df66211e5dba008764106fc82c3 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py | huihongxiao/vmware-nsx | 00530f9207958df66211e5dba008764106fc82c3 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py | huihongxiao/vmware-nsx | 00530f9207958df66211e5dba008764106fc82c3 | [
"Apache-2.0"
] | 1 | 2019-06-21T18:07:53.000Z | 2019-06-21T18:07:53.000Z | # Copyright 2016 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas.db.vpn import vpn_validator
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsxv_exc
from vmware_nsx.common import nsxv_constants
LOG = logging.getLogger(__name__)
class IPsecValidator(vpn_validator.VpnReferenceValidator):
"""Validator methods for Vmware VPN support"""
def __init__(self, service_plugin):
super(IPsecValidator, self).__init__()
self.vpn_plugin = service_plugin
def validate_ikepolicy_version(self, policy_info):
"""NSX Edge provides IKEv1"""
version = policy_info.get('ike_version')
if version != 'v1':
msg = _("Unsupported ike policy %s! only v1 "
"is supported right now.") % version
raise nsxv_exc.NsxIPsecVpnError(details=msg)
def validate_ikepolicy_pfs(self, policy_info):
# Check whether pfs is allowed.
if not nsxv_constants.PFS_MAP.get(policy_info['pfs']):
msg = _("Unsupported pfs: %(pfs)s! currently only "
"the following pfs are supported on VSE: %s") % {
'pfs': policy_info['pfs'],
'supported': nsxv_constants.PFS_MAP}
raise nsxv_exc.NsxVpnValidationError(details=msg)
def validate_encryption_algorithm(self, policy_info):
encryption = policy_info['encryption_algorithm']
if encryption not in nsxv_constants.ENCRYPTION_ALGORITHM_MAP:
msg = _("Unsupported encryption_algorithm: %(algo)s! please "
"select one of the followoing supported algorithms: "
"%(supported_algos)s") % {
'algo': encryption,
'supported_algos':
nsxv_constants.ENCRYPTION_ALGORITHM_MAP}
raise nsxv_exc.NsxVpnValidationError(details=msg)
def validate_ipsec_policy(self, context, policy_info):
"""Ensure IPSec policy encap mode is tunnel for current REST API."""
mode = policy_info['encapsulation_mode']
if mode not in nsxv_constants.ENCAPSULATION_MODE_ALLOWED:
msg = _("Unsupported encapsulation mode: %s! currently only"
"'tunnel' mode is supported.") % mode
raise nsxv_exc.NsxVpnValidationError(details=msg)
def validate_policies_matching_algorithms(self, ikepolicy, ipsecpolicy):
# In VSE, Phase 1 and Phase 2 share the same encryption_algorithm
# and authentication algorithms setting. At present, just record the
# discrepancy error in log and take ipsecpolicy to do configuration.
keys = ('auth_algorithm', 'encryption_algorithm', 'pfs')
for key in keys:
if ikepolicy[key] != ipsecpolicy[key]:
LOG.warning("IKEPolicy and IPsecPolicy should have consistent "
"auth_algorithm, encryption_algorithm and pfs for "
"VSE!")
break
def validate_ipsec_conn(self, context, ipsec_site_conn):
ike_policy_id = ipsec_site_conn['ikepolicy_id']
ipsec_policy_id = ipsec_site_conn['ipsecpolicy_id']
ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(context,
ipsec_policy_id)
ikepolicy = self.vpn_plugin.get_ikepolicy(context,
ike_policy_id)
self.validate_ikepolicy_version(ikepolicy)
self.validate_ikepolicy_pfs(ikepolicy)
self.validate_encryption_algorithm(ikepolicy)
self.validate_ipsec_policy(context, ipsecpolicy)
self.validate_policies_matching_algorithms(ikepolicy, ipsecpolicy)
| 46.202128 | 79 | 0.655998 |
43d983715e76b334623f647606a2587d38abb96a | 444 | py | Python | gokart/__init__.py | davegrays/gokart | 64999e5441217eafa5f0a6f3d2ee9aea04df740e | [
"MIT"
] | null | null | null | gokart/__init__.py | davegrays/gokart | 64999e5441217eafa5f0a6f3d2ee9aea04df740e | [
"MIT"
] | null | null | null | gokart/__init__.py | davegrays/gokart | 64999e5441217eafa5f0a6f3d2ee9aea04df740e | [
"MIT"
] | null | null | null | from gokart.build import build
from gokart.info import make_tree_info, tree_info
from gokart.pandas_type_config import PandasTypeConfig
from gokart.parameter import ExplicitBoolParameter, ListTaskInstanceParameter, TaskInstanceParameter
from gokart.run import run
from gokart.task import TaskOnKart
from gokart.testing import test_run
from gokart.utils import add_config
from gokart.workspace_management import delete_local_unnecessary_outputs
| 44.4 | 100 | 0.885135 |
003dc421b71c0c4c1edf98e0016b1c8416bae6fd | 1,072 | py | Python | hw_asr/tests/test_dataset.py | ainmukh/asr_project_template | 6ba81ed52764911295f774ff9ea2aa362fbe4d7d | [
"MIT"
] | 1 | 2021-12-12T13:13:05.000Z | 2021-12-12T13:13:05.000Z | hw_asr/tests/test_dataset.py | timothyxp/asr_project_template | 6f0534d44f93a251e11109e8262e06c9119004d9 | [
"MIT"
] | null | null | null | hw_asr/tests/test_dataset.py | timothyxp/asr_project_template | 6f0534d44f93a251e11109e8262e06c9119004d9 | [
"MIT"
] | null | null | null | import unittest
from hw_asr.datasets import LibrispeechDataset, CustomDirAudioDataset
from hw_asr.text_encoder.ctc_char_text_encoder import CTCCharTextEncoder
from hw_asr.utils import ROOT_PATH
from hw_asr.utils.parse_config import ConfigParser
class TestDataset(unittest.TestCase):
def test_librispeech(self):
text_encoder = CTCCharTextEncoder.get_simple_alphabet()
config_parser = ConfigParser.get_default_configs()
ds = LibrispeechDataset(
"dev-clean", text_encoder=text_encoder, config_parser=config_parser
)
item = ds[0]
print(item)
def test_custom_dataset(self):
text_encoder = CTCCharTextEncoder.get_simple_alphabet()
config_parser = ConfigParser.get_default_configs()
audio_dir = str(ROOT_PATH / 'test_data' / 'audio')
transc_dir = str(ROOT_PATH / 'test_data' / 'transcriptions')
ds = CustomDirAudioDataset(
audio_dir, transc_dir, text_encoder=text_encoder, config_parser=config_parser
)
item = ds[0]
print(item)
| 33.5 | 89 | 0.716418 |
67b881b47b0a2ce1d7a288708c2661f026b47c39 | 251 | py | Python | ssr/surface_rec/tasks/reduction_task.py | SBCV/SatelliteSurfaceReconstruction | 7127bc0fb36155e31ae2928c18a65d562d7d29ba | [
"BSD-3-Clause"
] | 34 | 2021-04-03T13:12:34.000Z | 2022-03-27T14:12:34.000Z | ssr/surface_rec/tasks/reduction_task.py | jieeeeeeeeeee/SatelliteSurfaceReconstruction | 24aa67ace67e69568cbc7e01a9e8b407463366f4 | [
"BSD-3-Clause"
] | 3 | 2021-04-06T14:01:43.000Z | 2021-05-25T08:40:13.000Z | ssr/surface_rec/tasks/reduction_task.py | jieeeeeeeeeee/SatelliteSurfaceReconstruction | 24aa67ace67e69568cbc7e01a9e8b407463366f4 | [
"BSD-3-Clause"
] | 6 | 2021-05-11T12:18:07.000Z | 2022-03-18T14:09:16.000Z | from ssr.surface_rec.tasks.task import Task
class ReductionTask(Task):
def __init__(self, colmap_idp, mesh_odp):
self.__dict__.update(locals())
# delete self, since it is redundant and a circular reference
del self.self
| 25.1 | 69 | 0.701195 |
ecb7767f8ce4fc3f226d3240f230a69329133962 | 3,457 | py | Python | celseq2/demultiplex_sam.py | johannesnicolaus/celseq2 | 0a0873d46aa1889d5389d3faa50ec01c465e85bd | [
"BSD-3-Clause"
] | 14 | 2017-11-02T15:44:13.000Z | 2022-03-26T13:37:17.000Z | celseq2/demultiplex_sam.py | johannesnicolaus/celseq2 | 0a0873d46aa1889d5389d3faa50ec01c465e85bd | [
"BSD-3-Clause"
] | 25 | 2018-02-06T19:47:31.000Z | 2022-03-03T07:46:04.000Z | celseq2/demultiplex_sam.py | johannesnicolaus/celseq2 | 0a0873d46aa1889d5389d3faa50ec01c465e85bd | [
"BSD-3-Clause"
] | 13 | 2018-01-19T20:19:59.000Z | 2022-03-26T13:37:16.000Z | #!/usr/bin/env python3
import pysam
import argparse
from celseq2.helper import print_logger
from celseq2.helper import join_path
from celseq2.demultiplex import bc_dict_id2seq, str2int
def _cell_seq (name, length=6):
# BC-TCTGAG_UMI-CGTTAC => TCTGAG
try:
out = name.split('_')[0][3:3 + length]
except Exception as e:
raise(e)
return(out)
def demultiplex_sam (samfile, outdir, bc_length):
if not samfile:
return
samobj = pysam.AlignmentFile(samfile, 'rb')
dict_samout = {}
for aln in samobj:
bc = _cell_seq(aln.query_name, length=bc_length)
fh = dict_samout.get(bc, None)
if not fh:
outsam = join_path(outdir, bc + '.sam')
fh = pysam.AlignmentFile(outsam, 'w', template=samobj)
dict_samout[bc] = fh
fh.write(aln)
for _, fh in dict_samout.items():
fh.close()
def demultiplex_sam_with_claim (samfile, outdir, bc_length, claimed_bc):
if not samfile:
return
if not claimed_bc:
return
samobj = pysam.AlignmentFile(samfile, 'rb')
dict_samout = {}
for bc in claimed_bc:
fh = pysam.AlignmentFile(
join_path(outdir, bc + '.sam'),
'w', template=samobj)
dict_samout[bc] = fh
for aln in samobj:
bc = _cell_seq(aln.query_name, length=bc_length)
fh = dict_samout.get(bc, None)
if not fh:
continue
fh.write(aln)
for _, fh in dict_samout.items():
fh.close()
def main():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--sbam', type=str, metavar='FILENAME',
help='File path to SAM/BAM file')
parser.add_argument('--savetodir', type=str, metavar='DIRNAME',
help='Directory path to save the demultiplexed SAMs.',
default='.')
parser.add_argument('--bc-length', type=int, metavar='N',
help='Length of cell barcode.', default=6)
parser.add_argument('--claim', action='store_true', dest='claim')
parser.set_defaults(claim=False)
parser.add_argument('--bc-index', type=str, metavar='FILENAME',
help='File path to barcode dictionary.')
parser.add_argument('--bc-seq-column', type=int, metavar='N',
default=0,
help=('Column of cell barcode dictionary file '
'which tells the actual sequences.'))
parser.add_argument('--bc-index-used', type=str, metavar='string',
default='1-96',
help='Index of used barcode IDs (default=1-96)')
args = parser.parse_args()
print_logger('Demultiplexing SAM/BAM starts {} ...'.format(args.sbam))
if args.claim:
all_bc_dict = bc_dict_id2seq(args.bc_index, args.bc_seq_column)
bc_index_used = str2int(args.bc_index_used)
bc_seq_used = [all_bc_dict.get(x, None) for x in bc_index_used]
demultiplex_sam_with_claim(
samfile=args.sbam,
outdir=args.savetodir,
bc_length=args.bc_length,
claimed_bc=bc_seq_used)
else:
demultiplex_sam(
samfile=args.sbam,
outdir=args.savetodir,
bc_length=args.bc_length)
print_logger('Demultiplexing SAM/BAM ends. See: {}'.format(args.savetodir))
if __name__ == "__main__":
main()
| 30.324561 | 79 | 0.596471 |
538d7d0ad5525068f974e817f1c2ba43990d1097 | 116 | py | Python | finance4py/__init__.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | 6 | 2016-12-28T03:40:46.000Z | 2017-03-31T12:04:43.000Z | finance4py/__init__.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | null | null | null | finance4py/__init__.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | 3 | 2018-04-26T03:14:29.000Z | 2021-06-13T16:18:04.000Z | # -*- coding: utf-8 -*-
from .instruments import Stock
from . import backtesting
__all__ = ['Stock', 'backtesting'] | 23.2 | 34 | 0.689655 |
71aa73742352e80e21409aa82e0682526a02d969 | 18,035 | py | Python | src/oidcop/oidc/registration.py | ctriant/oidc-op | 41d7f5e86ad086b6b3c266d1b92d54cbfe2b8733 | [
"Apache-2.0"
] | null | null | null | src/oidcop/oidc/registration.py | ctriant/oidc-op | 41d7f5e86ad086b6b3c266d1b92d54cbfe2b8733 | [
"Apache-2.0"
] | null | null | null | src/oidcop/oidc/registration.py | ctriant/oidc-op | 41d7f5e86ad086b6b3c266d1b92d54cbfe2b8733 | [
"Apache-2.0"
] | null | null | null | import hashlib
import hmac
import json
import logging
import secrets
import time
from typing import List
from urllib.parse import urlencode
from urllib.parse import urlparse
from cryptojwt.jws.utils import alg2keytype
from cryptojwt.utils import as_bytes
from oidcmsg.exception import MessageException
from oidcmsg.oauth2 import ResponseMessage
from oidcmsg.oidc import ClientRegistrationErrorResponse
from oidcmsg.oidc import RegistrationRequest
from oidcmsg.oidc import RegistrationResponse
from oidcmsg.time_util import utc_time_sans_frac
from oidcop import rndstr
from oidcop import sanitize
from oidcop.endpoint import Endpoint
from oidcop.exception import CapabilitiesMisMatch
from oidcop.exception import InvalidRedirectURIError
from oidcop.exception import InvalidSectorIdentifier
from oidcop.util import importer
from oidcop.util import split_uri
PREFERENCE2PROVIDER = {
# "require_signed_request_object": "request_object_algs_supported",
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg": "request_object_encryption_alg_values_supported",
"request_object_encryption_enc": "request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg": "userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc": "userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg": "id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc": "id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg": "token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
"grant_types": "grant_types_supported",
}
logger = logging.getLogger(__name__)
def match_sp_sep(first, second):
"""
Verify that all the values in 'first' appear in 'second'.
The values can either be in the form of lists or as space separated
items.
:param first:
:param second:
:return: True/False
"""
if isinstance(first, list):
one = [set(v.split(" ")) for v in first]
else:
one = [{v} for v in first.split(" ")]
if isinstance(second, list):
other = [set(v.split(" ")) for v in second]
else:
other = [{v} for v in second.split(" ")]
# all values in one must appear in other
if any(rt not in other for rt in one):
return False
return True
def verify_url(url: str, urlset: List[list]) -> bool:
part = urlparse(url)
for reg, qp in urlset:
_part = urlparse(reg)
if part.scheme == _part.scheme and part.netloc == _part.netloc:
return True
return False
def secret(seed: str, sid: str):
msg = "{}{}{}".format(time.time(), secrets.token_urlsafe(16), sid).encode("utf-8")
csum = hmac.new(as_bytes(seed), msg, hashlib.sha224)
return csum.hexdigest()
def comb_uri(args):
for param in ["redirect_uris", "post_logout_redirect_uris"]:
if param not in args:
continue
val = []
for base, query_dict in args[param]:
if query_dict:
query_string = urlencode(
[
(key, v)
for key in query_dict
for v in query_dict[key]
]
)
val.append("{base}?{query_string}")
else:
val.append(base)
args[param] = val
request_uris = args.get("request_uris")
if request_uris:
val = []
for base, frag in request_uris:
if frag:
val.append("{}#{}".format(base, frag))
else:
val.append(base)
args["request_uris"] = val
def random_client_id(length: int = 16, reserved: list = [], **kwargs):
# create new id och secret
client_id = rndstr(16)
# cdb client_id MUST be unique!
while client_id in reserved:
client_id = rndstr(16)
return client_id
class Registration(Endpoint):
request_cls = RegistrationRequest
response_cls = RegistrationResponse
error_response = ClientRegistrationErrorResponse
request_format = "json"
request_placement = "body"
response_format = "json"
endpoint_name = "registration_endpoint"
name = "registration"
# default
# response_placement = 'body'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Those that use seed wants bytes but I can only store str.
# seed
_seed = kwargs.get("seed") or rndstr(32)
self.seed = as_bytes(_seed)
def match_client_request(self, request):
_context = self.server_get("endpoint_context")
for _pref, _prov in PREFERENCE2PROVIDER.items():
if _pref in request:
if _pref in ["response_types", "default_acr_values"]:
if not match_sp_sep(request[_pref], _context.provider_info[_prov]):
raise CapabilitiesMisMatch(_pref)
else:
if isinstance(request[_pref], str):
if request[_pref] not in _context.provider_info[_prov]:
raise CapabilitiesMisMatch(_pref)
else:
if not set(request[_pref]).issubset(set(_context.provider_info[_prov])):
raise CapabilitiesMisMatch(_pref)
def do_client_registration(self, request, client_id, ignore=None):
if ignore is None:
ignore = []
_context = self.server_get("endpoint_context")
_cinfo = _context.cdb[client_id].copy()
logger.debug("_cinfo: %s" % sanitize(_cinfo))
for key, val in request.items():
if key not in ignore:
_cinfo[key] = val
if "post_logout_redirect_uris" in request:
plruri = []
for uri in request["post_logout_redirect_uris"]:
if urlparse(uri).fragment:
err = self.error_cls(
error="invalid_configuration_parameter",
error_description="post_logout_redirect_uris contains fragment",
)
return err
plruri.append(split_uri(uri))
_cinfo["post_logout_redirect_uris"] = plruri
if "redirect_uris" in request:
try:
ruri = self.verify_redirect_uris(request)
_cinfo["redirect_uris"] = ruri
except InvalidRedirectURIError as e:
return self.error_cls(error="invalid_redirect_uri", error_description=str(e))
if "request_uris" in request:
_uris = []
for uri in request["request_uris"]:
_up = urlparse(uri)
if _up.query:
err = self.error_cls(
error="invalid_configuration_parameter",
error_description="request_uris contains query part",
)
return err
if _up.fragment:
# store base and fragment
_uris.append(uri.split("#"))
else:
_uris.append([uri, ""])
_cinfo["request_uris"] = _uris
if "sector_identifier_uri" in request:
try:
(_cinfo["si_redirects"], _cinfo["sector_id"],) = self._verify_sector_identifier(
request
)
except InvalidSectorIdentifier as err:
return ResponseMessage(
error="invalid_configuration_parameter", error_description=str(err)
)
for item in ["policy_uri", "logo_uri", "tos_uri"]:
if item in request:
if verify_url(request[item], _cinfo["redirect_uris"]):
_cinfo[item] = request[item]
else:
return ResponseMessage(
error="invalid_configuration_parameter",
error_description="%s pointed to illegal URL" % item,
)
# Do I have the necessary keys
for item in ["id_token_signed_response_alg", "userinfo_signed_response_alg"]:
if item in request:
if request[item] in _context.provider_info[PREFERENCE2PROVIDER[item]]:
ktyp = alg2keytype(request[item])
# do I have this ktyp and for EC type keys the curve
if ktyp not in ["none", "oct"]:
_k = []
for iss in ["", _context.issuer]:
_k.extend(
_context.keyjar.get_signing_key(
ktyp, alg=request[item], issuer_id=iss
)
)
if not _k:
logger.warning('Lacking support for "{}"'.format(request[item]))
del _cinfo[item]
t = {"jwks_uri": "", "jwks": None}
for item in ["jwks_uri", "jwks"]:
if item in request:
t[item] = request[item]
# if it can't load keys because the URL is false it will
# just silently fail. Waiting for better times.
_context.keyjar.load_keys(client_id, jwks_uri=t["jwks_uri"], jwks=t["jwks"])
n_keys = 0
for kb in _context.keyjar.get(client_id, []):
n_keys += len(kb.keys())
msg = "found {} keys for client_id={}"
logger.debug(msg.format(n_keys, client_id))
return _cinfo
@staticmethod
def verify_redirect_uris(registration_request):
verified_redirect_uris = []
client_type = registration_request.get("application_type", "web")
must_https = False
if client_type == "web":
must_https = True
if registration_request.get("response_types") == ["code"]:
must_https = False
for uri in registration_request["redirect_uris"]:
_custom = False
p = urlparse(uri)
if client_type == "native":
if p.scheme not in ["http", "https"]: # Custom scheme
_custom = True
elif p.scheme == "http" and p.hostname in ["localhost", "127.0.0.1"]:
pass
else:
logger.error(
"InvalidRedirectURI: scheme:%s, hostname:%s", p.scheme, p.hostname,
)
raise InvalidRedirectURIError(
"Redirect_uri must use custom " "scheme or http and localhost"
)
elif must_https and p.scheme != "https":
msg = "None https redirect_uri not allowed"
raise InvalidRedirectURIError(msg)
elif p.scheme not in ["http", "https"]:
# Custom scheme
raise InvalidRedirectURIError("Custom redirect_uri not allowed for web client")
elif p.fragment:
raise InvalidRedirectURIError("redirect_uri contains fragment")
if _custom: # Can not verify a custom scheme
verified_redirect_uris.append((uri, {}))
else:
base, query = split_uri(uri)
if query:
verified_redirect_uris.append((base, query))
else:
verified_redirect_uris.append((base, {}))
return verified_redirect_uris
def _verify_sector_identifier(self, request):
"""
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
"""
si_url = request["sector_identifier_uri"]
try:
res = self.server_get("endpoint_context").httpc.get(
si_url, **self.server_get("endpoint_context").httpc_params
)
logger.debug("sector_identifier_uri => %s", sanitize(res.text))
except Exception as err:
logger.error(err)
# res = None
raise InvalidSectorIdentifier("Couldn't read from sector_identifier_uri")
try:
si_redirects = json.loads(res.text)
except ValueError:
raise InvalidSectorIdentifier("Error deserializing sector_identifier_uri content")
if "redirect_uris" in request:
logger.debug("redirect_uris: %s", request["redirect_uris"])
for uri in request["redirect_uris"]:
if uri not in si_redirects:
raise InvalidSectorIdentifier("redirect_uri missing from sector_identifiers")
return si_redirects, si_url
def add_registration_api(self, cinfo, client_id, context):
_rat = rndstr(32)
cinfo["registration_access_token"] = _rat
endpoint = self.server_get("endpoints")
cinfo["registration_client_uri"] = "{}?client_id={}".format(
endpoint["registration_read"].full_path, client_id
)
context.registration_access_token[_rat] = client_id
def client_secret_expiration_time(self):
"""
Returns client_secret expiration time.
"""
if not self.kwargs.get("client_secret_expires", True):
return 0
_expiration_time = self.kwargs.get("client_secret_expires_in", 2592000)
return utc_time_sans_frac() + _expiration_time
def add_client_secret(self, cinfo, client_id, context):
client_secret = secret(self.seed, client_id)
cinfo["client_secret"] = client_secret
_eat = self.client_secret_expiration_time()
if _eat:
cinfo["client_secret_expires_at"] = _eat
return client_secret
def client_registration_setup(self, request, new_id=True, set_secret=True):
try:
request.verify()
except (MessageException, ValueError) as err:
logger.error("request.verify() on %s", request)
return ResponseMessage(
error="invalid_configuration_request", error_description="%s" % err
)
request.rm_blanks()
try:
self.match_client_request(request)
except CapabilitiesMisMatch as err:
return ResponseMessage(
error="invalid_request", error_description="Don't support proposed %s" % err,
)
_context = self.server_get("endpoint_context")
if new_id:
if self.kwargs.get("client_id_generator"):
cid_generator = importer(self.kwargs["client_id_generator"]["class"])
cid_gen_kwargs = self.kwargs["client_id_generator"].get("kwargs", {})
else:
cid_generator = importer("oidcop.oidc.registration.random_client_id")
cid_gen_kwargs = {}
client_id = cid_generator(reserved=_context.cdb.keys(), **cid_gen_kwargs)
if "client_id" in request:
del request["client_id"]
else:
client_id = request.get("client_id")
if not client_id:
raise ValueError("Missing client_id")
_cinfo = {"client_id": client_id, "client_salt": rndstr(8)}
if self.server_get("endpoint", "registration_read"):
self.add_registration_api(_cinfo, client_id, _context)
if new_id:
_cinfo["client_id_issued_at"] = utc_time_sans_frac()
client_secret = ""
if set_secret:
client_secret = self.add_client_secret(_cinfo, client_id, _context)
logger.debug("Stored client info in CDB under cid={}".format(client_id))
_context.cdb[client_id] = _cinfo
_cinfo = self.do_client_registration(
request, client_id, ignore=["redirect_uris", "policy_uri", "logo_uri", "tos_uri"],
)
if isinstance(_cinfo, ResponseMessage):
return _cinfo
args = dict([(k, v) for k, v in _cinfo.items() if k in self.response_cls.c_param])
comb_uri(args)
response = self.response_cls(**args)
# Add the client_secret as a symmetric key to the key jar
if client_secret:
_context.keyjar.add_symmetric(client_id, str(client_secret))
logger.debug("Stored updated client info in CDB under cid={}".format(client_id))
logger.debug("ClientInfo: {}".format(_cinfo))
_context.cdb[client_id] = _cinfo
# Not all databases can be sync'ed
if hasattr(_context.cdb, "sync") and callable(_context.cdb.sync):
_context.cdb.sync()
msg = "registration_response: {}"
logger.info(msg.format(sanitize(response.to_dict())))
return response
def process_request(self, request=None, new_id=True, set_secret=True, **kwargs):
try:
reg_resp = self.client_registration_setup(request, new_id, set_secret)
except Exception as err:
logger.error("client_registration_setup: %s", request)
return ResponseMessage(
error="invalid_configuration_request", error_description="%s" % err
)
if "error" in reg_resp:
return reg_resp
else:
_context = self.server_get("endpoint_context")
_cookie = _context.new_cookie(
name=_context.cookie_handler.name["register"], client_id=reg_resp["client_id"],
)
return {"response_args": reg_resp, "cookie": _cookie}
| 37.809224 | 97 | 0.598226 |
4907cf650a32aea227af93685345487087285012 | 788 | py | Python | curation_projects/misc/chitraveda_images.py | lalitaalaalitah/doc_curation | d26072dc5afd645019788b4844058da5d0c63ebe | [
"MIT"
] | 7 | 2020-04-25T08:55:55.000Z | 2020-12-21T05:31:00.000Z | curation_projects/misc/chitraveda_images.py | lalitaalaalitah/doc_curation | d26072dc5afd645019788b4844058da5d0c63ebe | [
"MIT"
] | 15 | 2020-04-25T08:31:42.000Z | 2021-07-24T12:23:42.000Z | curation_projects/misc/chitraveda_images.py | lalitaalaalitah/doc_curation | d26072dc5afd645019788b4844058da5d0c63ebe | [
"MIT"
] | 4 | 2020-04-25T06:28:37.000Z | 2020-12-19T21:30:10.000Z | import logging
import regex
from doc_curation import google_sheets_index
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s")
if __name__ == "__main__":
doc_data = google_sheets_index.ImageSheet(spreadhsheet_id="10yfI7hntiJ1NudQbFBFtQYGntiMdsjbu_9r6MYoCnCw",
worksheet_name="चित्राणि", google_key = '/home/vvasuki/sysconf/kunchikA/google/sanskritnlp/service_account_key.json', url_column="imgurl", file_name_column="description")
doc_data.download_all(destination_dir="/home/vvasuki/vvasuki-git/notes/mantra/images")
| 37.52381 | 216 | 0.738579 |
cc6b654dcd699052ae003a89faf30b74907a0b2a | 803 | py | Python | tests/test_lang.py | edne/rami | 9c30fdb0dc8afb7898959ba71b8abca84a97cf25 | [
"MIT"
] | null | null | null | tests/test_lang.py | edne/rami | 9c30fdb0dc8afb7898959ba71b8abca84a97cf25 | [
"MIT"
] | null | null | null | tests/test_lang.py | edne/rami | 9c30fdb0dc8afb7898959ba71b8abca84a97cf25 | [
"MIT"
] | null | null | null | from rami.lang import Lang
lang = Lang()
@lang.leaf
def sides(value):
return {'sides': value}
@lang.leaf
def color(value):
return {'color': value}
@lang.branch
def group(branches):
return {'group': list(branches)}
@lang.branch
def square(branches):
return {'square': list(branches)}
@lang.branch
def polygon(branches):
return {'polygon': list(branches)}
code = '''
group:
square:
color: [0, 1, 1]
polygon:
sides: 6
color: [1, 1, 0]
polygon:
sides: 3
color: [1, 1, 0]
'''
def test_lang():
g, p = lang.eval(code)
assert g == {'group': [{'square': [{'color': [0, 1, 1]}]},
{'polygon': [{'sides': 6}, {'color': [1, 1, 0]}]}]}
assert p == {'polygon': [{'sides': 3}, {'color': [1, 1, 0]}]}
| 14.87037 | 78 | 0.526775 |
a3f4ed50967431c03d0015817ae2fa4c90f054f7 | 955 | py | Python | fetal_plate_segmentation/tests/test_fetal_plate_segmentation.py | jwhong1125/pl-fetal_plate_segmentation | 02ca7da1a0232f0d411e7cbb515c4a80ad4037fc | [
"MIT"
] | null | null | null | fetal_plate_segmentation/tests/test_fetal_plate_segmentation.py | jwhong1125/pl-fetal_plate_segmentation | 02ca7da1a0232f0d411e7cbb515c4a80ad4037fc | [
"MIT"
] | 6 | 2020-01-28T23:15:07.000Z | 2022-02-10T00:39:03.000Z | fetal_plate_segmentation/tests/test_fetal_plate_segmentation.py | jwhong1125/pl-fetal_plate_segmentation | 02ca7da1a0232f0d411e7cbb515c4a80ad4037fc | [
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest import mock
from fetal_plate_segmentation.fetal_plate_segmentation import Fetal_plate_segmentation
class Fetal_plate_segmentationTests(TestCase):
"""
Test Fetal_plate_segmentation.
"""
def setUp(self):
self.app = Fetal_plate_segmentation()
def test_run(self):
"""
Test the run code.
"""
args = []
if self.app.TYPE == 'ds':
args.append('inputdir') # you may want to change this inputdir mock
args.append('outputdir') # you may want to change this outputdir mock
# you may want to add more of your custom defined optional arguments to test
# your app with
# eg.
# args.append('--custom-int')
# args.append(10)
options = self.app.parse_args(args)
self.app.run(options)
# write your own assertions
self.assertEqual(options.outputdir, 'outputdir')
| 28.088235 | 86 | 0.637696 |
2459cbff6e0c67037d407f7e2008ee9cfff566fc | 230 | py | Python | tests/server_test.py | artegoser/sUDP | c2e8f0a2fdd535fd9ff1a165c93b275762bff142 | [
"MIT"
] | 1 | 2021-01-21T08:37:55.000Z | 2021-01-21T08:37:55.000Z | tests/server_test.py | artegoser/sUDP | c2e8f0a2fdd535fd9ff1a165c93b275762bff142 | [
"MIT"
] | null | null | null | tests/server_test.py | artegoser/sUDP | c2e8f0a2fdd535fd9ff1a165c93b275762bff142 | [
"MIT"
] | null | null | null | import SUDP
import time
sock = SUDP.sUDPsocket()
sock.bind (('192.168.0.183',9090))
while True:
msg , address = sock.recvfrom(1024)
sock.retimeout = 1 #any timeout
print(sock.sendto(("lol").encode("utf-8"), address)) | 23 | 56 | 0.669565 |
f764d091b3a085d00802ab7f140d23d2ba7a09fd | 125 | py | Python | 0x04-python-more_data_structures/1-search_replace.py | omarcherni007/holbertonschool-higher_level_programming | 65f3430ab0310f85368d73cb72e139631e8c6f1e | [
"MIT"
] | 1 | 2022-01-04T11:07:56.000Z | 2022-01-04T11:07:56.000Z | 0x04-python-more_data_structures/1-search_replace.py | omarcherni007/holbertonschool-higher_level_programming | 65f3430ab0310f85368d73cb72e139631e8c6f1e | [
"MIT"
] | null | null | null | 0x04-python-more_data_structures/1-search_replace.py | omarcherni007/holbertonschool-higher_level_programming | 65f3430ab0310f85368d73cb72e139631e8c6f1e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
def search_replace(my_list, search, replace):
return [replace if i == search else i for i in my_list]
| 31.25 | 59 | 0.72 |
f1983e8f6a07b8ff320d3b8644142f41ca27289a | 3,098 | py | Python | protons/scripts/cli.py | choderalab/Protons | a55d7a72d0e4f2f402a2c144c5a7fb34ccce2a7f | [
"MIT"
] | 16 | 2016-12-30T23:58:06.000Z | 2021-09-29T02:40:35.000Z | protons/scripts/cli.py | choderalab/protons | a55d7a72d0e4f2f402a2c144c5a7fb34ccce2a7f | [
"MIT"
] | 75 | 2016-10-31T19:52:32.000Z | 2021-03-03T00:01:35.000Z | protons/scripts/cli.py | choderalab/Protons | a55d7a72d0e4f2f402a2c144c5a7fb34ccce2a7f | [
"MIT"
] | 5 | 2017-01-18T23:15:07.000Z | 2021-04-01T00:50:36.000Z | """This library contains a minimal command line interface to enable tracking the common use cases.
This should be more maintainable than having separate python scripts inside the examples directory.
"""
import os
import sys
from ..app.logger import log, logging
from typing import List
from .run_simulation import run_main
from .run_prep_ffxml import run_prep_ffxml_main
from .run_parametrize_ligand import run_parametrize_main
log.setLevel(logging.DEBUG)
# Define a main function that can read in a json file with simulation settings, sets up, and runs the simulation.
_logo = """
██████╗ ██████╗ ██████╗ ████████╗ ██████╗ ███╗ ██╗███████╗
██╔══██╗██╔══██╗██╔═══██╗╚══██╔══╝██╔═══██╗████╗ ██║██╔════╝
██████╔╝██████╔╝██║ ██║ ██║ ██║ ██║██╔██╗ ██║███████╗
██╔═══╝ ██╔══██╗██║ ██║ ██║ ██║ ██║██║╚██╗██║╚════██║
██║ ██║ ██║╚██████╔╝ ██║ ╚██████╔╝██║ ╚████║███████║
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝
"""
def validate(args: List[str]) -> str:
"""Validate input or return appropriate help string for the given arguments"""
usage = (
_logo
+ """
Protons minimal command line interface.
Usage
-----
protons param <toml>
Parameterize a ligand based on a user provided toml input file.
protons prep <toml>
Produce an input file for a constant-pH simulation or calibration by specifying simulation settings in a toml file.
Note: Currently only ffxml supported.
protons run <toml>
Run a constant-pH simulation or calibration from a toml file.
protons help <cmd>
Provide a longer explanation for a command, including example toml files.
Copyright
---------
The John D. Chodera lab, 2019
See more at https://protons.readthedocs.io/en/latest/,
or check out our website: https://www.choderalab.org
"""
)
if len(args) != 3:
return usage
cmd = args[1].lower()
arg = args[2]
if cmd == "help":
if arg.lower() == "param":
raise NotImplementedError("This help feature is incomplete.")
elif arg.lower() == "prep":
raise NotImplementedError("This help feature is incomplete.")
elif arg.lower() == "run":
raise NotImplementedError("This help feature is incomplete.")
else:
return f"Unknown command: {arg}. \n" + usage
elif cmd in ["param", "prep", "run"]:
if os.path.splitext(arg)[1].lower() != ".toml":
return "Please provide a '.toml' file as input."
else:
return ""
def cli() -> None:
"""Command line interface for protons run script."""
args = sys.argv
result = validate(args)
if result:
sys.stderr.write(result)
sys.exit(1)
else:
cmd = args[1].lower()
arg = args[2]
if cmd == "run":
run_main(arg)
elif cmd == "prep":
run_prep_ffxml_main(arg)
elif cmd == "param":
run_parametrize_main(arg)
sys.exit(0)
| 27.90991 | 123 | 0.544545 |
f5dbced62806b2c242a44aa2d89c99bede047878 | 3,265 | py | Python | password_manager.py | Sarvesh-SP/passwordManager | 2ef32aa1329c2c49e3fada776855caf1fc791b4f | [
"MIT"
] | null | null | null | password_manager.py | Sarvesh-SP/passwordManager | 2ef32aa1329c2c49e3fada776855caf1fc791b4f | [
"MIT"
] | null | null | null | password_manager.py | Sarvesh-SP/passwordManager | 2ef32aa1329c2c49e3fada776855caf1fc791b4f | [
"MIT"
] | null | null | null | #importing configs and credentials
from utils import config
from update import update
#from secret import get_secret_key
from menu import menu, create, find, find_accounts, delAccount
#importing the crypto module
from cryptography.fernet import Fernet
#functions to generate\load an encryption key
def generate_key():
key = Fernet.generate_key()
with open('secret.key','wb') as key_file:
key_file.write(key)
def load_key():
return open('secret.key','rb').read()
#functions to encrytpt and decrypt the password
def encrypt_pw(password):
key = load_key()
encoded_pw = password.encode()
f = Fernet(key)
encrypted_pw = f.encrypt(encoded_pw)
return encrypted_pw
def decrypt_pw(enc_password):
key = load_key()
f = Fernet(key)
decrypted_pw = f.decrypt(enc_password)
decoded_pw = decrypted_pw.decode()
return decoded_pw
#this function is run to see whether the programm has already been run and parse data from the config_file.txt file
def launch():
global launched
launched = False
if launched == config.islaunched:
generate_key()
global user_name,password,user,pw_db,dbname
print("---------------------------------------------------------")
print('Please Enter the masterUsername: ')
user_name = input('--> ')
print("---------------------------------------------------------")
password = encrypt_pw(input('Enter your password: \n--> '))
print("---------------------------------------------------------")
print('Enter the name of the user of your data base: ')
user = input('--> ')
print("---------------------------------------------------------")
print('The password of the data base: ')
pw_db = input('--> ')
print("---------------------------------------------------------")
print('Finally, the name of the data base: ')
dbname = input('--> ')
print("---------------------------------------------------------")
launched = True
args = ["islaunched","user_name","password","dbuser","dbname","dbpw"]
credentials = [launched,user_name, password,user,dbname, pw_db]
update(args,credentials)
else:
user_name = config.user_name
password = config.password
user = config.dbuser
pw_db = config.dbpw
dbname = config.dbname
return launched,user_name, password,user,pw_db,dbname
def run(n,p,a,b,c):
print("----------------------------------------------------------------------")
passw = input(f'Please provide the master password to start using {n}: ')
print("----------------------------------------------------------------------")
if passw == decrypt_pw(p):
print('You\'re in')
else:
print('no luck')
exit()
choice = menu()
while choice != 'Q':
if choice == '1':
create(a,b,c)
if choice == '2':
find_accounts(a,b,c)
if choice == '3':
find(a,b,c)
choice = menu()
elif choice == '4':
choice = delAccount(a, b, c)
else:
choice = menu()
#if __name__ == "__main__":
launch()
run(user_name, password,user,pw_db,dbname)
| 33.316327 | 115 | 0.518224 |
6e906c92aae1110cb86839d7bb53080029048724 | 1,460 | py | Python | wsgi/iportalen_django/articles/migrations/0029_imageattachment_otherattachment.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/articles/migrations/0029_imageattachment_otherattachment.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/articles/migrations/0029_imageattachment_otherattachment.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import articles.models
class Migration(migrations.Migration):
dependencies = [
('articles', '0028_auto_20160104_2353'),
]
operations = [
migrations.CreateModel(
name='ImageAttachment',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('img', models.ImageField(verbose_name='artikelbild', upload_to=articles.models._image_file_path)),
('thumbnail', models.ImageField(blank=True, verbose_name='förhandsvisning', upload_to=articles.models._image_file_path, null=True)),
('caption', models.CharField(max_length=100)),
('article', models.ForeignKey(to='articles.Article')),
],
),
migrations.CreateModel(
name='OtherAttachment',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('file', models.FileField(verbose_name='artikelbilaga', upload_to=articles.models._file_path)),
('display_name', models.CharField(max_length=160)),
('file_name', models.CharField(blank=True, max_length=300)),
('article', models.ForeignKey(to='articles.Article')),
],
),
]
| 40.555556 | 148 | 0.614384 |
8439e76c26e37961eb1f7e9d735d7f6202c644ce | 688 | py | Python | py_wake/tests/test_utils/test_check_input.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 30 | 2019-03-18T14:10:27.000Z | 2022-03-13T17:39:04.000Z | py_wake/tests/test_utils/test_check_input.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 1 | 2020-11-12T06:13:00.000Z | 2020-11-12T06:43:26.000Z | py_wake/tests/test_utils/test_check_input.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 20 | 2019-01-11T14:45:13.000Z | 2021-12-13T19:55:29.000Z | import pytest
from py_wake.utils.check_input import check_input
import numpy as np
def test_check_input():
input_space = [(0, 1), (100, 200)]
with pytest.raises(ValueError, match="Input, index_0, with value, 2 outside range 0-1"):
check_input(input_space, np.array([(2, 150)]).T)
with pytest.raises(ValueError, match="Input, index_1, with value, 50 outside range 100-200"):
check_input(input_space, np.array([(1, 50)]).T)
with pytest.raises(ValueError, match="Input, wd, with value, 250 outside range 100-200"):
check_input(input_space, np.array([(1, 250)]).T, ['ws', 'wd'])
check_input(input_space, np.array([(1, 200)]).T, ['ws', 'wd'])
| 36.210526 | 97 | 0.667151 |
af1148b45a88f76f79d3ff1e4f04186e11212e0b | 955 | py | Python | commands/mkdir.py | dtebbs/tzbuild | be284674bd17db8f5de33b27db14381fa1290da6 | [
"BSD-3-Clause"
] | null | null | null | commands/mkdir.py | dtebbs/tzbuild | be284674bd17db8f5de33b27db14381fa1290da6 | [
"BSD-3-Clause"
] | null | null | null | commands/mkdir.py | dtebbs/tzbuild | be284674bd17db8f5de33b27db14381fa1290da6 | [
"BSD-3-Clause"
] | null | null | null | from os import makedirs
from os.path import exists
from sys import argv
def mkdir():
args = argv[1:]
dirs = []
verbose = False
create_intermediate = True
while len(args) > 0:
a = args.pop(0)
if "-v" == a:
verbose = True
elif "-p" == a:
# TODO: ?
create_intermediate = True
elif "-h" == a or "--help" == a:
usage()
exit(0)
else:
dirs.append(a)
for d in dirs:
if not exists(d):
if verbose:
print "Making dir: %s" % d
try:
makedirs(d)
except OSError, e:
# Directory may have already been created after the check
# above
pass
if not exists(d):
print "Error creating dir: %s" % d
return 1
return 0
if "__main__" == __name__:
exit(mkdir())
| 20.76087 | 73 | 0.448168 |
5837a31b419fd2eb2fecca560f3b7a2d9c7ea344 | 801 | py | Python | draco/programs.py | ShreyaBM/draco2 | 1b937b53c59a0821a4ed32a7380709e5b1798fe4 | [
"MIT"
] | 22 | 2020-11-17T18:46:59.000Z | 2022-02-22T23:15:45.000Z | draco/programs.py | ShreyaBM/draco2 | 1b937b53c59a0821a4ed32a7380709e5b1798fe4 | [
"MIT"
] | 65 | 2020-12-02T21:51:48.000Z | 2022-03-24T20:23:11.000Z | draco/programs.py | ShreyaBM/draco2 | 1b937b53c59a0821a4ed32a7380709e5b1798fe4 | [
"MIT"
] | 5 | 2020-11-20T05:36:28.000Z | 2022-01-11T21:12:49.000Z | from dataclasses import dataclass
from pathlib import Path
from draco.asp_utils import Blocks, parse_blocks
asp_path = Path(__file__).resolve().parent / "asp"
@dataclass(frozen=True)
class Program:
"""Class for an Answer Set Programming (ASP) program.
Attributes:
:program: The complete program.
:blocks: The blocks in the program.
"""
program: str
blocks: Blocks
def get_program(file_path: Path) -> Program:
with open(file_path) as f:
prog = f.read()
return Program(prog, parse_blocks(prog))
definitions = get_program(asp_path / "define.lp")
constraints = get_program(asp_path / "constraints.lp")
generate = get_program(asp_path / "generate.lp")
hard = get_program(asp_path / "hard.lp")
helpers = get_program(asp_path / "helpers.lp")
| 24.272727 | 57 | 0.701623 |
0c54a06985d0ef817c355d6b5bafc5e2d9a2a0ba | 2,233 | py | Python | app/api/v2/models/sales_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | null | null | null | app/api/v2/models/sales_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | 2 | 2018-12-10T12:03:16.000Z | 2019-10-21T16:53:02.000Z | app/api/v2/models/sales_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | null | null | null | '''contains models for the app'''
import os
import psycopg2.extras
from psycopg2 import sql
from .basemodel import Basemodel
from app.api.database import init_DB
conn= init_DB()
cursor = conn.cursor()
class Sale(Basemodel):
def __init__(self):
super().__init__()
def add_sale(self, sales_items, quantity, price):
"""Adds new orders"""
sales = """INSERT INTO
sales (sales_items, quantity, price)
VALUES('%s','%s','%s')""" % (sales_items, quantity, price)
self.cursor.execute(sales)
self.conn.commit()
def find_sale_name(self, sales_items):
'''Get a product by item name'''
cursor.execute("""SELECT * FROM sales WHERE sales_items='{}'""".format(sales_items))
sales = cursor.fetchone()
conn.commit()
return sales
def all_orders(self):
"""Return available orders"""
self.cursor.execute("""SELECT * FROM sales """)
sales = self.cursor.fetchall()
self.conn.commit()
return sales
def single_order(self,id):
'''Return a single Order '''
cursor.execute("""SELECT * FROM sales WHERE id='{}';""".format(id))
singlesale = cursor.fetchone()
conn.commit()
return singlesale
def delete_order(self, id):
'''Delete a product'''
cursor.execute("""SELECT * FROM sales WHERE id='{}';""".format(id))
dele = cursor.fetchone()
if not dele:
return{'message':'sale ID not found'}
cursor.execute("""DELETE FROM sales WHERE id='{}';""".format(id))
conn.commit()
return{'message':'sale deleted'}, 200
def modify_items(self, id, sales_items, quantity, price):
'''modify a sale'''
self.cursor.execute("""UPDATE sales
SET sales_items='{}',
quantity={},
price={}
WHERE id='{}' RETURNING id;""".format(sales_items, quantity, price, id))
modify = self.cursor.fetchone()
self.conn.commit()
if not modify:
return{'message':'sales item not found'}
return modify
| 30.589041 | 100 | 0.553963 |
0447b25bbb2b2ae68eceaf05d42a755d4703fb42 | 3,550 | py | Python | easy_rec/python/input/tfrecord_input.py | alibaba/EasyRec | 436205f8480fa131d4b6e9d166b3ab85bd6b9d9d | [
"Apache-2.0"
] | 285 | 2021-10-11T03:39:43.000Z | 2022-03-31T09:12:33.000Z | easy_rec/python/input/tfrecord_input.py | alibaba/EasyRec | 436205f8480fa131d4b6e9d166b3ab85bd6b9d9d | [
"Apache-2.0"
] | 84 | 2021-10-15T03:48:58.000Z | 2022-03-31T12:38:53.000Z | easy_rec/python/input/tfrecord_input.py | alibaba/EasyRec | 436205f8480fa131d4b6e9d166b3ab85bd6b9d9d | [
"Apache-2.0"
] | 71 | 2021-10-15T03:33:44.000Z | 2022-03-31T08:37:11.000Z | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import tensorflow as tf
from easy_rec.python.input.input import Input
from easy_rec.python.utils.tf_utils import get_tf_type
if tf.__version__ >= '2.0':
tf = tf.compat.v1
class TFRecordInput(Input):
def __init__(self,
data_config,
feature_config,
input_path,
task_index=0,
task_num=1,
check_mode=False):
super(TFRecordInput, self).__init__(data_config, feature_config, input_path,
task_index, task_num, check_mode)
self.feature_desc = {}
for x, t, d in zip(self._input_fields, self._input_field_types,
self._input_field_defaults):
d = self.get_type_defaults(t, d)
t = get_tf_type(t)
self.feature_desc[x] = tf.FixedLenFeature(
dtype=t, shape=1, default_value=d)
def _parse_tfrecord(self, example):
try:
inputs = tf.parse_single_example(example, features=self.feature_desc)
except AttributeError:
inputs = tf.io.parse_single_example(example, features=self.feature_desc)
return inputs
def _build(self, mode, params):
if type(self._input_path) != list:
self._input_path = self._input_path.split(',')
file_paths = []
for x in self._input_path:
file_paths.extend(tf.gfile.Glob(x))
assert len(file_paths) > 0, 'match no files with %s' % self._input_path
num_parallel_calls = self._data_config.num_parallel_calls
data_compression_type = self._data_config.data_compression_type
if mode == tf.estimator.ModeKeys.TRAIN:
logging.info('train files[%d]: %s' %
(len(file_paths), ','.join(file_paths)))
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if self._data_config.shuffle:
# shuffle input files
dataset = dataset.shuffle(len(file_paths))
# too many readers read the same file will cause performance issues
# as the same data will be read multiple times
parallel_num = min(num_parallel_calls, len(file_paths))
dataset = dataset.interleave(
lambda x: tf.data.TFRecordDataset(
x, compression_type=data_compression_type),
cycle_length=parallel_num,
num_parallel_calls=parallel_num)
dataset = dataset.shard(self._task_num, self._task_index)
if self._data_config.shuffle:
dataset = dataset.shuffle(
self._data_config.shuffle_buffer_size,
seed=2020,
reshuffle_each_iteration=True)
dataset = dataset.repeat(self.num_epochs)
else:
logging.info('eval files[%d]: %s' %
(len(file_paths), ','.join(file_paths)))
dataset = tf.data.TFRecordDataset(
file_paths, compression_type=data_compression_type)
dataset = dataset.repeat(1)
dataset = dataset.map(
self._parse_tfrecord, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(self._data_config.batch_size)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
dataset = dataset.map(
map_func=self._preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
if mode != tf.estimator.ModeKeys.PREDICT:
dataset = dataset.map(lambda x:
(self._get_features(x), self._get_labels(x)))
else:
dataset = dataset.map(lambda x: (self._get_features(x)))
return dataset
| 37.368421 | 80 | 0.661972 |
7031629af4908404917c54e64d7c698f0d47351b | 3,698 | py | Python | backend/apps/volontulo/tests/views/api/organizations/test_read.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 16 | 2016-10-29T19:45:11.000Z | 2021-04-23T03:54:22.000Z | backend/apps/volontulo/tests/views/api/organizations/test_read.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 1,063 | 2016-10-28T17:43:39.000Z | 2018-12-12T17:42:19.000Z | backend/apps/volontulo/tests/views/api/organizations/test_read.py | magul/volontulo | 778168219e003b585604ee88cc54d03575bdc00e | [
"MIT"
] | 60 | 2016-11-02T18:28:35.000Z | 2018-11-07T17:01:05.000Z | """
.. module:: test_read
"""
from rest_framework import status
from rest_framework.test import APITestCase
from apps.volontulo.factories import OrganizationFactory
from apps.volontulo.factories import UserFactory
class _TestOrganizationsReadAPIView(APITestCase):
"""Tests for REST API's read organization view."""
def _test_organization_read_fields(self, organization):
"""Test read's fields of organizations REST API endpoint."""
self.assertIsInstance(organization.pop('address'), str)
self.assertIsInstance(organization.pop('description'), str)
self.assertIsInstance(organization.pop('id'), int)
self.assertIsInstance(organization.pop('name'), str)
self.assertIsInstance(organization.pop('slug'), str)
self.assertIsInstance(organization.pop('url'), str)
self.assertEqual(len(organization), 0)
class TestAdminUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for admin user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory(
userprofile__is_administrator=True
))
def test_organization_read_status(self):
"""Test organization's read status for admin user.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestOrganizationUserOrganizationsReadAPIView(
_TestOrganizationsReadAPIView):
"""Tests for API's read organization view for user with organization."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory(
userprofile__organizations=[OrganizationFactory()]
))
def test_organization_read_status(self):
"""Test organization's read status for user with organization.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestRegularUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for regular user."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.client.force_login(UserFactory())
def test_organization_read_status(self):
"""Test organization's read status for regular user.
Organizations are readable for everyone.
"""
self.client.force_login(UserFactory())
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
class TestAnonymousUserOrganizationsReadAPIView(_TestOrganizationsReadAPIView):
"""Tests for REST API's read organization view for anonymous user."""
def test_organization_read_status(self):
"""Test organization's read status for anonymous user.
Organizations are readable for everyone.
"""
response = self.client.get(
'/api/organizations/{id}/'.format(id=OrganizationFactory().id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._test_organization_read_fields(response.data)
| 33.315315 | 79 | 0.695782 |
e7f2a75349f080e6ef9556951fc033879ae1e187 | 1,969 | py | Python | application/api.py | DonBlaine/OpenDoorData | 74740c6ff6dca893f0389963f2ef12de42a36829 | [
"MIT"
] | null | null | null | application/api.py | DonBlaine/OpenDoorData | 74740c6ff6dca893f0389963f2ef12de42a36829 | [
"MIT"
] | null | null | null | application/api.py | DonBlaine/OpenDoorData | 74740c6ff6dca893f0389963f2ef12de42a36829 | [
"MIT"
] | null | null | null | # file that contains db models to be exposed via a REST API
from models import room, survey, wifi_log, timetable, module # import db models
from app import app # import Flask app
from auth import auth # import Auth app to provide user authentificaiton
from flask import request # import request object to parse json request data
from flask_peewee.rest import RestAPI,UserAuthentication, RestrictOwnerResource, AdminAuthentication
# create RestrictOwnerResource subclass which prevents users modifying another user's content
class SurveyResource(RestrictOwnerResource):
owner_field = 'reporter'
def check_post(self):
'''fucntion that checks users are associated with the module they are submitting a POST request to '''
obj = request.get_json() # parse and return incoming json request data
user = obj["reporter"]
mod= obj["module_code"]
modules = module.select().where(module.module_code == mod) # select module data from module table in db using module_code posted by user
authorized = False # initialise authorized variable as False
for item in modules:
instructor = str(item.instructor) # select instructor associated with item
if instructor == user:
authorized = True
return authorized
# instantiate UserAuthentication
user_auth = UserAuthentication(auth)
# instantiate admin-only auth
admin_auth = AdminAuthentication(auth)
# instantiate our api wrapper, specifying user_auth as the default
api = RestAPI(app, default_auth=user_auth)
# register models so they are exposed via /api/<model>/
api.register(room, auth=admin_auth, allowed_methods=['GET'])
api.register(survey,SurveyResource,allowed_methods=['GET','POST'])
api.register(wifi_log, auth=admin_auth,allowed_methods=['GET'])
api.register(timetable, auth=admin_auth, allowed_methods=['GET'])
api.register(module, auth=admin_auth, allowed_methods=['GET'])
| 39.38 | 145 | 0.739462 |
ddc801cb7f36547573640fd3ba477abc9d63bb64 | 594 | py | Python | FinBoost/tree/__init__.py | VictorLi-QES/scikit-learn | 40b5a835cf4174f008af794a19556958eba957db | [
"BSD-3-Clause"
] | null | null | null | FinBoost/tree/__init__.py | VictorLi-QES/scikit-learn | 40b5a835cf4174f008af794a19556958eba957db | [
"BSD-3-Clause"
] | null | null | null | FinBoost/tree/__init__.py | VictorLi-QES/scikit-learn | 40b5a835cf4174f008af794a19556958eba957db | [
"BSD-3-Clause"
] | null | null | null | """
The :mod:`FinBoost.tree` module includes decision tree-based models for
classification and regression.
"""
from ._classes import BaseDecisionTree
from ._classes import DecisionTreeClassifier
from ._classes import DecisionTreeRegressor
from ._classes import ExtraTreeClassifier
from ._classes import ExtraTreeRegressor
from ._export import export_graphviz, plot_tree, export_text
__all__ = [
"BaseDecisionTree",
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"export_graphviz",
"plot_tree",
"export_text",
]
| 25.826087 | 71 | 0.776094 |
fe762d644d52a8d50700de9b3421a6812c1ff7aa | 5,079 | py | Python | submissions/Ottenlips/mygames.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | submissions/Ottenlips/mygames.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | submissions/Ottenlips/mygames.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
class GameState:
def __init__(self, to_move, position, board, label=None):
self.to_move = to_move
self.position = position
self.board = board
self.label = label
self.scores = {'S': 0}
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Move:
def __init__(self, p, v):
self.position = p
self.value = v
self.initial = GameState(to_move='Player One')
def pv(self):
return self.position, self.value
# def getValue(self):
class Star29(Game):
"""
An implementation of ThinkAhead
"""
def __init__(self, state):
self.initial = state
self.startingBoard = state.board
def actions(self, state):
# ""
p = state.position
return state.board
# defines the order of play
def opponent(self, player):
if player == 'Player One':
return 'Player Two'
if player == 'Player One':
return 'Player Two'
return None
def result(self, state, move):
p = move
# state.board[p] = v
currMover = state.to_move
nextMover = self.opponent(currMover)
value = move
# state.board[0] +
newState = deepcopy(state)
newState.to_move = nextMover
newState.position = p
index = newState.board.index(value)
if value == self.startingBoard[0]:
newState.board = (self.startingBoard[2], self.startingBoard[3])
if value == self.startingBoard[1]:
# newState.board = (4,5)
newState.board = (self.startingBoard[3], self.startingBoard[4])
if value == self.startingBoard[2]:
# newState.board = (5,1)
newState.board = (self.startingBoard[4], self.startingBoard[0])
if value == self.startingBoard[3]:
# newState.board = (1, 2)
newState.board = (self.startingBoard[0], self.startingBoard[1])
if value == self.startingBoard[4]:
# newState.board = (2, 3)
newState.board = (self.startingBoard[1], self.startingBoard[2])
# newState.board[p] = nan
newState.scores['S'] += value
# newState.scores['P1'] += value
# newState.scores['P2'] += value
# if currMover == 'Player One':
# newState.scores['P1'] += value
# elif currMover == 'Player Two':
# newState.scores['P2'] += value
self.lastMove = newState.position
return newState
def utility(self, state, player):
"if player goes over 29 they loose"
if player == 'Player One' and state.scores['S'] == 29:
return -1;
if state.scores['S'] <= 28:
if range(2,5) in state.board:
return -1
else:
return 1
# if state.scores['S'] > 28:
# return -state.scores['S']
# if state.scores['S'] == 28:
# return state.scores['P1'] - state.scores['P1']
return 0
def terminal_test(self, state):
"A state is terminal if it is over 29."
if state.scores['S'] >= 29:
return 1
return 0
def display(self, state):
if(len(state.board)==2):
print("--> "+str(state.board[0])+" or --> "+ str(state.board[1]))
else:
print("First move "+"\n " + str(state.board[0])+"\n"+str(state.board[1])+" "+str(state.board[4])+"\n"+str(state.board[2])+" "+str(state.board[3]))
print('Score: ' + str(state.scores))
# def check_win(self, board, player, state):
# if state.scores['P2'] >= 29 and player=="Player Two" :
# return 1
# if state.scores['P1'] >= 29 and player == "Player One":
# return -1
#
# return 0
full_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'full'
)
full_game.scores = {'S':0}
next_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 5'
)
next_game.scores = {'S':5}
next_game2 = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 10'
)
next_game2.scores = {'S':10}
mid_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 15'
)
mid_game.scores = {'S':15}
almost_done_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 20'
)
almost_done_game.scores = {'S':20}
won_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'wonS27'
)
won_game.scores = {'S':27}
thinkA = Star29(full_game)
myGames = {
thinkA: [
full_game,
next_game,
next_game2,
mid_game,
almost_done_game,
won_game
]
} | 26.731579 | 161 | 0.552865 |
3903f20fca20becad1439fdd1580eff0e0b15551 | 249 | py | Python | ml_snek/models/base_model.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | null | null | null | ml_snek/models/base_model.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | 13 | 2019-12-25T21:04:49.000Z | 2020-01-04T20:25:05.000Z | ml_snek/models/base_model.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | null | null | null | class BaseModel(object):
def save(self, save_path):
raise NotImplementedError()
def load(self, save_path):
raise NotImplementedError()
def train(self, input_values, expected_output):
raise NotImplementedError()
| 24.9 | 51 | 0.686747 |
10efe27ac37122b887999fa50992a93f16c72bf3 | 4,146 | py | Python | xarray/backends/pseudonetcdf_.py | ajwdewit/xarray | ff56e726f4b63a11cf8c5c6fac8f0a519c921fd8 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/backends/pseudonetcdf_.py | ajwdewit/xarray | ff56e726f4b63a11cf8c5c6fac8f0a519c921fd8 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/backends/pseudonetcdf_.py | ajwdewit/xarray | ff56e726f4b63a11cf8c5c6fac8f0a519c921fd8 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-07-13T07:06:10.000Z | 2021-07-13T07:06:10.000Z | import numpy as np
from ..core import indexing
from ..core.utils import Frozen, FrozenDict, close_on_error
from ..core.variable import Variable
from .common import AbstractDataStore, BackendArray
from .file_manager import CachingFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock
from .plugins import BackendEntrypoint
from .store import open_backend_dataset_store
# psuedonetcdf can invoke netCDF libraries internally
PNETCDF_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK])
class PncArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.dtype)
def get_array(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
array = self.get_array(needs_lock=False)
return array[key]
class PseudoNetCDFDataStore(AbstractDataStore):
"""Store for accessing datasets via PseudoNetCDF"""
@classmethod
def open(cls, filename, lock=None, mode=None, **format_kwargs):
from PseudoNetCDF import pncopen
keywords = {"kwargs": format_kwargs}
# only include mode if explicitly passed
if mode is not None:
keywords["mode"] = mode
if lock is None:
lock = PNETCDF_LOCK
manager = CachingFileManager(pncopen, filename, lock=lock, **keywords)
return cls(manager, lock)
def __init__(self, manager, lock=None):
self._manager = manager
self.lock = ensure_lock(lock)
@property
def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))
attrs = {k: getattr(var, k) for k in var.ncattrs()}
return Variable(var.dimensions, data, attrs)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen({k: getattr(self.ds, k) for k in self.ds.ncattrs()})
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
return {
"unlimited_dims": {
k for k in self.ds.dimensions if self.ds.dimensions[k].isunlimited()
}
}
def close(self):
self._manager.close()
def open_backend_dataset_pseudonetcdf(
filename_or_obj,
mask_and_scale=False,
decode_times=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
mode=None,
lock=None,
**format_kwargs,
):
store = PseudoNetCDFDataStore.open(
filename_or_obj, lock=lock, mode=mode, **format_kwargs
)
with close_on_error(store):
ds = open_backend_dataset_store(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
# *args and **kwargs are not allowed in open_backend_dataset_ kwargs,
# unless the open_dataset_parameters are explicity defined like this:
open_dataset_parameters = (
"filename_or_obj",
"mask_and_scale",
"decode_times",
"concat_characters",
"decode_coords",
"drop_variables",
"use_cftime",
"decode_timedelta",
"mode",
"lock",
)
pseudonetcdf_backend = BackendEntrypoint(
open_dataset=open_backend_dataset_pseudonetcdf,
open_dataset_parameters=open_dataset_parameters,
)
| 29.197183 | 85 | 0.675591 |
86d8026087f66b8b443cf67a752f278e28b5dfb7 | 6,564 | py | Python | HW3-2/lib/model.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | 3 | 2019-06-20T06:47:30.000Z | 2021-11-05T03:16:37.000Z | HW3-2/lib/model.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | null | null | null | HW3-2/lib/model.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.cuda as cuda
import torch.nn as nn
import torch.nn.functional as F
from lib.utils import *
def weights_init(model):
class_name = model.__class__.__name__
if class_name.find('Conv') != -1:
nn.init.normal_(model.weight.data, 0.0, 0.02)
elif class_name.find('Linear') != -1:
nn.init.normal_(model.weight.data, 0.0, 0.02)
nn.init.normal_(model.bias.data, 0.0, 0.02)
elif class_name.find('BatchNorm') != -1:
nn.init.normal_(model.weight.data, 1.0, 0.02)
nn.init.constant_(model.bias.data, 0.0)
class Text2ImageGAN(nn.Module):
def __init__(self, text_length, device, distribution = 'torch', noise_length = 100, out_channel = 3, channel = 64, sigmoid_used = True, init_weight = True):
super(Text2ImageGAN, self).__init__()
self.text_length = text_length
self.device = device
self.distribution = distribution
self.noise_length = noise_length
self.out_channel = out_channel
self.channel = channel
self.sigmoid_used = sigmoid_used
self.init_weight = init_weight
self.generator = T2IGenerator(channel, out_channel, noise_length, text_length)
self.discriminator = T2IDiscriminator(out_channel, channel, text_length, sigmoid_used)
if init_weight:
self.generator.apply(weights_init)
self.discriminator.apply(weights_init)
self.generator = self.generator.float().to(self.device)
self.discriminator = self.discriminator.float().to(self.device)
def forward(self, feed, mode = 'generate'):
# feed is list for [text, image]
if mode not in ['generate', 'discriminate']:
raise ValueError('Please check the model mode, [generate or discrimiate].')
if mode == 'generate':
latent_vector = self._latent_random(feed[0].size(0), self.distribution)
return self.generator(feed[0], latent_vector)
elif mode == 'discriminate':
return self.discriminator(feed[1], feed[0])
else:
raise RuntimeError('Please check the model mode.')
def _latent_random(self, numbers, distribution):
if distribution == 'uniform':
latent = np.random.uniform(-1, 1, (numbers, self.noise_length))
return torch.tensor(latent).float().to(self.device)
elif distribution == 'normal':
latent = np.random.normal(0, 1, (numbers, self.noise_length))
return torch.tensor(latent).float().to(self.device)
elif distribution == 'torch':
latent = torch.randn(numbers, self.noise_length)
return latent.float().to(self.device)
else:
raise RuntimeError("Can't generate random latent vector.")
class T2IGenerator(nn.Module):
def __init__(self, channel, out_channel, noise_length, text_length):
super(T2IGenerator, self).__init__()
self.channel = channel
self.out_channel = out_channel
self.noise_length = noise_length
self.text_length = text_length
self.embedding = nn.Linear(text_length, channel * 4)
self.dense = nn.Linear(channel * 4 + noise_length, 4 * 4 * channel * 8)
self.main = nn.Sequential(
# size: [batch * (channel * 8) * 4 * 4]
nn.ConvTranspose2d(channel * 8, channel * 4, kernel_size = 5, stride = 2, padding = 2, output_padding = 1, bias = False),
nn.BatchNorm2d(channel * 4, momentum = 0.9),
nn.ReLU(inplace = True),
# size [batch * (channel * 4) * 8 * 8]
nn.ConvTranspose2d(channel * 4, channel * 2, kernel_size = 5, stride = 2, padding = 2, output_padding = 1, bias = False),
nn.BatchNorm2d(channel * 2, momentum = 0.9),
nn.ReLU(inplace = True),
# size [batch * (channel * 4) * 16 * 16]
nn.ConvTranspose2d(channel * 2, channel, kernel_size = 5, stride = 2, padding = 2, output_padding = 1, bias = False),
nn.BatchNorm2d(channel, momentum = 0.9),
nn.ReLU(inplace = True),
# size [batch * (channel * 4) * 32 * 32]
nn.ConvTranspose2d(channel, out_channel, kernel_size = 5, stride = 2, padding = 2, output_padding = 1, bias = False),
nn.Tanh()
# size: [batch * out_channel * 64 * 64]
)
def forward(self, text, noise):
text = self.embedding(text)
x = torch.cat((noise, text), dim = 1)
x = self.dense(x)
x = x.view(x.size(0), self.channel * 8, 4, 4)
return self.main(x)
class T2IDiscriminator(nn.Module):
def __init__(self, in_channel, channel, text_length, sigmoid_used):
super( T2IDiscriminator, self).__init__()
self.in_channel = in_channel
self.channel = channel
self.text_length = text_length
self.sigmoid_used = sigmoid_used
self.embedding = nn.Linear(text_length, channel * 4)
self.main = nn.Sequential(
nn.Conv2d(in_channel, channel, kernel_size = 5, stride = 2, padding = 2, bias = False),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(channel, channel * 2, kernel_size = 5, stride = 2, padding = 2, bias = False),
nn.BatchNorm2d(channel * 2),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(channel * 2, channel * 4, kernel_size = 5, stride = 2, padding = 2, bias = False),
nn.BatchNorm2d(channel * 4),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(channel * 4, channel * 8, kernel_size = 5, stride = 2, padding = 2, bias = False),
nn.BatchNorm2d(channel * 8),
nn.LeakyReLU(0.2, inplace = True),
)
self.conv = nn.Conv2d((channel * 8 + channel * 4), channel * 8,
kernel_size = 1, stride = 1, padding = 0, bias = False)
self.lrelu = nn.LeakyReLU(0.2, inplace = True)
self.dense = nn.Linear(channel * 8 * 4 * 4, 1)
def forward(self, image, text):
text_embedding = self.embedding(text)
text_embedding = text_embedding.view(-1, 256, 1, 1).repeat(1, 1, 4, 4)
x = self.main(image)
x = torch.cat((x, text_embedding), dim = 1)
x = self.conv(x)
x = self.lrelu(x).view(x.size(0), -1)
x = self.dense(x)
if self.sigmoid_used:
return torch.sigmoid(x)
else:
return x
| 42.076923 | 160 | 0.591255 |
e46ae60556a70e200844e8ff422d78a1b36c08c2 | 238 | py | Python | src/python/bin/args.py | matrix65537/xgo | f1b289cb5bb47a548a915223f9a32ef15299f8e7 | [
"MIT"
] | null | null | null | src/python/bin/args.py | matrix65537/xgo | f1b289cb5bb47a548a915223f9a32ef15299f8e7 | [
"MIT"
] | null | null | null | src/python/bin/args.py | matrix65537/xgo | f1b289cb5bb47a548a915223f9a32ef15299f8e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#codign:utf-8
import sys
import os
def main():
print sys.argv
data = sys.stdin.read()
length = len(data)
if length > 0:
print "length = %d" %(length)
if __name__ == '__main__':
main()
| 14 | 37 | 0.584034 |
b6274123ce1d52b7feba9223f8f2dc97797eb23f | 4,345 | py | Python | ddtrace/contrib/logging/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/logging/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2022-02-16T09:35:37.000Z | 2022-03-04T16:48:45.000Z | ddtrace/contrib/logging/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-11T16:34:22.000Z | 2022-02-11T16:34:22.000Z | import logging
import attr
import ddtrace
from ...internal.utils import get_argument_value
from ...vendor.wrapt import wrap_function_wrapper as _w
from ..trace_utils import unwrap as _u
RECORD_ATTR_TRACE_ID = "dd.trace_id"
RECORD_ATTR_SPAN_ID = "dd.span_id"
RECORD_ATTR_ENV = "dd.env"
RECORD_ATTR_VERSION = "dd.version"
RECORD_ATTR_SERVICE = "dd.service"
RECORD_ATTR_VALUE_ZERO = "0"
RECORD_ATTR_VALUE_EMPTY = ""
_LOG_SPAN_KEY = "__datadog_log_span"
ddtrace.config._add(
"logging",
dict(
tracer=None,
),
) # by default, override here for custom tracer
@attr.s(slots=True)
class DDLogRecord(object):
trace_id = attr.ib(type=int)
span_id = attr.ib(type=int)
service = attr.ib(type=str)
version = attr.ib(type=str)
env = attr.ib(type=str)
def _get_current_span(tracer=None):
"""Helper to get the currently active span"""
if not tracer:
tracer = ddtrace.tracer
# We might be calling this during library initialization, in which case `ddtrace.tracer` might
# be the `tracer` module and not the global tracer instance.
if not getattr(tracer, "enabled", False):
return None
return tracer.current_span()
def _w_makeRecord(func, instance, args, kwargs):
# Get the LogRecord instance for this log
record = func(*args, **kwargs)
setattr(record, RECORD_ATTR_VERSION, ddtrace.config.version or "")
setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or "")
setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or "")
# logs from internal logger may explicitly pass the current span to
# avoid deadlocks in getting the current span while already in locked code.
span_from_log = getattr(record, _LOG_SPAN_KEY, None)
if isinstance(span_from_log, ddtrace.Span):
span = span_from_log
else:
span = _get_current_span(tracer=ddtrace.config.logging.tracer)
if span:
setattr(record, RECORD_ATTR_TRACE_ID, str(span.trace_id))
setattr(record, RECORD_ATTR_SPAN_ID, str(span.span_id))
else:
setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO)
setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO)
return record
def _w_StrFormatStyle_format(func, instance, args, kwargs):
# The format string "dd.service={dd.service}" expects
# the record to have a "dd" property which is an object that
# has a "service" property
# PercentStyle, and StringTemplateStyle both look for
# a "dd.service" property on the record
record = get_argument_value(args, kwargs, 0, "record")
record.dd = DDLogRecord(
trace_id=getattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO),
span_id=getattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO),
service=getattr(record, RECORD_ATTR_SERVICE, ""),
version=getattr(record, RECORD_ATTR_VERSION, ""),
env=getattr(record, RECORD_ATTR_ENV, ""),
)
try:
return func(*args, **kwargs)
finally:
# We need to remove this extra attribute so it does not pollute other formatters
# For example: if we format with StrFormatStyle and then a JSON logger
# then the JSON logger will have `dd.{service,version,env,trace_id,span_id}` as
# well as the `record.dd` `DDLogRecord` instance
del record.dd
def patch():
"""
Patch ``logging`` module in the Python Standard Library for injection of
tracer information by wrapping the base factory method ``Logger.makeRecord``
"""
if getattr(logging, "_datadog_patch", False):
return
setattr(logging, "_datadog_patch", True)
_w(logging.Logger, "makeRecord", _w_makeRecord)
if hasattr(logging, "StrFormatStyle"):
if hasattr(logging.StrFormatStyle, "_format"):
_w(logging.StrFormatStyle, "_format", _w_StrFormatStyle_format)
else:
_w(logging.StrFormatStyle, "format", _w_StrFormatStyle_format)
def unpatch():
if getattr(logging, "_datadog_patch", False):
setattr(logging, "_datadog_patch", False)
_u(logging.Logger, "makeRecord")
if hasattr(logging, "StrFormatStyle"):
if hasattr(logging.StrFormatStyle, "_format"):
_u(logging.StrFormatStyle, "_format")
else:
_u(logging.StrFormatStyle, "format")
| 33.423077 | 98 | 0.695052 |
17e40efb1568d97a31b8a25a292ee079e552f819 | 1,607 | py | Python | functions/sample/python/get_review.py | kirankumarsripati/agfzb-CloudAppDevelopment_Capstone | 0155976113bed72b5081fdb545246641e7c2ea0d | [
"Apache-2.0"
] | null | null | null | functions/sample/python/get_review.py | kirankumarsripati/agfzb-CloudAppDevelopment_Capstone | 0155976113bed72b5081fdb545246641e7c2ea0d | [
"Apache-2.0"
] | null | null | null | functions/sample/python/get_review.py | kirankumarsripati/agfzb-CloudAppDevelopment_Capstone | 0155976113bed72b5081fdb545246641e7c2ea0d | [
"Apache-2.0"
] | null | null | null | #
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
from cloudant.client import Cloudant
from cloudant.error import CloudantException
import requests
fields = ["id", "name", "dealership", "review", "purchase",\
"purchase_date", "car_make", "car_model", "car_year"]
def main(dict):
databaseName = "reviews"
try:
client = Cloudant.iam(
account_name=dict["COUCH_USERNAME"],
api_key=dict["IAM_API_KEY"],
connect=True,
)
database = client[databaseName]
if "dealerId" in dict:
selector = { "dealership": {"$eq": int(dict["dealerId"]) } }
result_data = database.get_query_result(selector,raw_result=True,limit=100, fields=fields)
result = result_data["docs"]
else:
result_data = database.all_docs(include_docs=True, limit=100)["rows"]
result = list(map(filter_fields, result_data))
print("records: {0}".format(result))
except CloudantException as ce:
print("unable to connect")
return {"error": ce}
except (requests.exceptions.RequestException, ConnectionResetError) as err:
print("connection error")
return {"error": err}
return { "reviews": result }
def filter_fields(review):
review_item = {}
doc = review["doc"]
for field in fields:
if field in doc:
review_item[field] = doc[field]
return review_item
| 31.509804 | 102 | 0.632234 |
1082fa34925048bb5583f013bf61ae4cc897c7f2 | 1,445 | py | Python | lib/datasets/utils.py | kacel33/ActionAI_PC | a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7 | [
"MIT"
] | 1,311 | 2017-03-28T09:24:20.000Z | 2022-03-30T02:43:11.000Z | lib/datasets/utils.py | kacel33/ActionAI_PC | a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7 | [
"MIT"
] | 144 | 2017-05-09T16:35:40.000Z | 2022-03-25T03:14:42.000Z | lib/datasets/utils.py | kacel33/ActionAI_PC | a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7 | [
"MIT"
] | 437 | 2017-03-30T15:23:14.000Z | 2022-03-25T09:18:50.000Z | import functools
import math
import numpy as np
from .coco import COCO_KEYPOINTS, HFLIP
def horizontal_swap_coco(keypoints):
target = np.zeros(keypoints.shape)
for source_i, xyv in enumerate(keypoints):
source_name = COCO_KEYPOINTS[source_i]
target_name = HFLIP.get(source_name)
if target_name:
target_i = COCO_KEYPOINTS.index(target_name)
else:
target_i = source_i
target[target_i] = xyv
return target
@functools.lru_cache(maxsize=64)
def create_sink(side):
if side == 1:
return np.zeros((2, 1, 1))
sink1d = np.linspace((side - 1.0) / 2.0, -(side - 1.0) / 2.0, num=side, dtype=np.float32)
sink = np.stack((
sink1d.reshape(1, -1).repeat(side, axis=0),
sink1d.reshape(-1, 1).repeat(side, axis=1),
), axis=0)
return sink
def mask_valid_area(intensities, valid_area):
"""Mask area.
Intensities is either a feature map or an image.
"""
if valid_area is None:
return
if valid_area[1] >= 1.0:
intensities[:, :int(valid_area[1]), :] = 0
if valid_area[0] >= 1.0:
intensities[:, :, :int(valid_area[0])] = 0
max_i = int(math.ceil(valid_area[1] + valid_area[3]))
max_j = int(math.ceil(valid_area[0] + valid_area[2]))
if max_i < intensities.shape[1]:
intensities[:, max_i:, :] = 0
if max_j < intensities.shape[2]:
intensities[:, :, max_j:] = 0
| 26.272727 | 93 | 0.613149 |
aceb7a9ef2afd3c1472a71d88a0574be54fe332f | 9,660 | py | Python | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 167 | 2020-04-21T21:04:14.000Z | 2022-03-29T15:07:52.000Z | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 11 | 2020-05-19T18:49:24.000Z | 2021-06-08T01:51:29.000Z | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 41 | 2020-05-03T06:13:17.000Z | 2022-02-12T17:32:51.000Z | # Import modules
import datetime
import spiceypy
import numpy as np
import pandas as pd
# Load the SPICE kernels via a meta file
spiceypy.furnsh('kernel_meta.txt')
# We want to compute miscellaneous positions w.r.t. the centre of
# the Sun for a certain time interval.
# First, we set an initial time in UTC.
INIT_TIME_UTC = datetime.datetime(year=2000, month=1, day=1, \
hour=0, minute=0, second=0)
# Add a number of days; you can play around with the datetime variables; but
# leave it as it is for the first try, since other computations and comments
# are based on this value.
DELTA_DAYS = 10000
END_TIME_UTC = INIT_TIME_UTC + datetime.timedelta(days=DELTA_DAYS)
# Convert the datetime objects now to strings
INIT_TIME_UTC_STR = INIT_TIME_UTC.strftime('%Y-%m-%dT%H:%M:%S')
END_TIME_UTC_STR = END_TIME_UTC.strftime('%Y-%m-%dT%H:%M:%S')
# Print the starting and end times
print('Init time in UTC: %s' % INIT_TIME_UTC_STR)
print('End time in UTC: %s\n' % END_TIME_UTC_STR)
# Convert to Ephemeris Time (ET) using the SPICE function utc2et
INIT_TIME_ET = spiceypy.utc2et(INIT_TIME_UTC_STR)
END_TIME_ET = spiceypy.utc2et(END_TIME_UTC_STR)
# Create a numpy array that covers a time interval in delta = 1 day step
TIME_INTERVAL_ET = np.linspace(INIT_TIME_ET, END_TIME_ET, DELTA_DAYS)
#%%
# Using km is not intuitive. AU would scale it too severely. Since we compute
# the Solar System Barycentre (SSB) w.r.t. the Sun; and since we expect it to
# be close to the Sun, we scale the x, y, z component w.r.t the radius of the
# Sun. We extract the Sun radii (x, y, z components of the Sun ellipsoid) and
# use the x component
_, RADII_SUN = spiceypy.bodvcd(bodyid=10, item='RADII', maxn=3)
RADIUS_SUN = RADII_SUN[0]
#%%
# All our computed parameters, positions etc. shall be stored in a pandas
# dataframe. First, we create an empty one
SOLAR_SYSTEM_DF = pd.DataFrame()
# Set the column ET that stores all ETs
SOLAR_SYSTEM_DF.loc[:, 'ET'] = TIME_INTERVAL_ET
# The column UTC transforms all ETs back to a UTC format. The function
# spicepy.et2datetime is NOT an official part of SPICE (there you can find
# et2utc).
# However this function returns immediately a datetime object
SOLAR_SYSTEM_DF.loc[:, 'UTC'] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: spiceypy.et2datetime(et=x).date())
# Here, the position of the SSB, as seen from the Sun, is computed. Since
# spicepy.spkgps returns the position and the corresponding light time,
# we add the index [0] to obtain only the position array
SOLAR_SYSTEM_DF.loc[:, 'POS_SSB_WRT_SUN'] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: spiceypy.spkgps(targ=0, \
et=x, \
ref='ECLIPJ2000', \
obs=10)[0])
# Now the SSB position vector is scaled with the Sun's radius
SOLAR_SYSTEM_DF.loc[:, 'POS_SSB_WRT_SUN_SCALED'] = \
SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].apply(lambda x: x / RADIUS_SUN)
# Finally the distance between the Sun and the SSB is computed. The length
# (norm) of the vector needs to be determined with the SPICE function vnorm().
# numpy provides an identical function in: numpy.linalg.norm()
SOLAR_SYSTEM_DF.loc[:, 'SSB_WRT_SUN_SCALED_DIST'] = \
SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN_SCALED'].apply(lambda x: \
spiceypy.vnorm(x))
#%%
# Import the matplotlib library
from matplotlib import pyplot as plt
# Set a figure
FIG, AX = plt.subplots(figsize=(12, 8))
# Plot the distance between the Sun and the SSB
AX.plot(SOLAR_SYSTEM_DF['UTC'], SOLAR_SYSTEM_DF['SSB_WRT_SUN_SCALED_DIST'], \
color='tab:blue')
# Set a label for the x and y axis and color the y ticks accordingly
AX.set_xlabel('Date in UTC')
AX.set_ylabel('SSB Dist. in Sun Radii', color='tab:blue')
AX.tick_params(axis='y', labelcolor='tab:blue')
# Set limits for the x and y axis
AX.set_xlim(min(SOLAR_SYSTEM_DF['UTC']), max(SOLAR_SYSTEM_DF['UTC']))
AX.set_ylim(0, 2)
# Set a grid
AX.grid(axis='x', linestyle='dashed', alpha=0.5)
# Saving the figure in high quality
plt.savefig('SSB2SUN_DISTANCE.png', dpi=300)
#%%
# Additionally, we want to compute the position vector of all outer gas
# giants. We define a dictionary with a planet's barycentre abbreviation and
# corresponding NAIF ID code
NAIF_ID_DICT = {'JUP': 5, \
'SAT': 6, \
'URA': 7, \
'NEP': 8}
# Iterate through the dictionary and compute the position vector for each
# planet as seen from the Sun. Further, compute the phase angle between the
# SSB and the planet as seen from the Sun
for planets_name_key in NAIF_ID_DICT:
# Define the pandas dataframe column for each planet (position and phase
# angle). Each '%s' substring is replaced with the planets name as
# indicated after the "%"
planet_pos_col = 'POS_%s_WRT_SUN' % planets_name_key
planet_angle_col = 'PHASE_ANGLE_SUN_%s2SSB' % planets_name_key
# Get the corresponding NAIF ID of the planet's barycentre
planet_id = NAIF_ID_DICT[planets_name_key]
# Compute the planet's position as seen from the Sun.
SOLAR_SYSTEM_DF.loc[:, planet_pos_col] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: \
spiceypy.spkgps(targ=planet_id, \
et=x, \
ref='ECLIPJ2000', \
obs=10)[0])
# Compute the phase angle between the SSB and the planet as seen from the
# Sun. Since we apply a lambda function on all columns we need to set
# axis=1, otherwise we get an error!
SOLAR_SYSTEM_DF.loc[:, planet_angle_col] = \
SOLAR_SYSTEM_DF.apply(lambda x: \
np.degrees(spiceypy.vsep(x[planet_pos_col], \
x['POS_SSB_WRT_SUN'])),\
axis=1)
#%%
# Let's verify the function vsep and compute the phase angle between the SSB
# and Jupiter as seen from the Sun (we use the very first array entries).
# Define a lambda function the computes the angle between two vectors
COMP_ANGLE = lambda vec1, vec2: np.arccos(np.dot(vec1, vec2) \
/ (np.linalg.norm(vec1) \
* np.linalg.norm(vec2)))
print('Phase angle between the SSB and Jupiter as seen from the Sun (first ' \
'array entry, lambda function): %s' % \
np.degrees(COMP_ANGLE(SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].iloc[0], \
SOLAR_SYSTEM_DF['POS_JUP_WRT_SUN'].iloc[0])))
print('Phase angle between the SSB and Jupiter as seen from the Sun (first ' \
'array entry, SPICE vsep function): %s' % \
np.degrees(spiceypy.vsep(SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].iloc[0], \
SOLAR_SYSTEM_DF['POS_JUP_WRT_SUN'].iloc[0])))
#%%
# Create a 4 axes plot where all 4 plots are vertically aligned and share the
# x axis (date in UTC)
FIG, (AX1, AX2, AX3, AX4) = plt.subplots(4, 1, sharex=True, figsize=(8, 20))
# We iterate through the planets (from Jupiter to Neptune) and plot the
# phase angle between the planet and the SSB, as seen from the Sun, in each
# axis individually
for ax_f, planet_abr, planet_name in zip([AX1, AX2, AX3, AX4], \
['JUP', 'SAT', 'URA', 'NEP'], \
['Jupiter', 'Saturn', 'Uranus', \
'Neptune']):
# First, we set the planet's name as the sub plot title (instead of
# setting a legend)
ax_f.set_title(planet_name, color='tab:orange')
# The distance between the SSB and the Sun is plotted.
ax_f.plot(SOLAR_SYSTEM_DF['UTC'], \
SOLAR_SYSTEM_DF['SSB_WRT_SUN_SCALED_DIST'], \
color='tab:blue')
# A y label is set and the color of labels and ticks are adjusted for
# better visibility
ax_f.set_ylabel('SSB Dist. in Sun Radii', color='tab:blue')
ax_f.tick_params(axis='y', labelcolor='tab:blue')
# Set x (based on the min and max date) and y limits (the SSB has varying
# distances between 0 and 2 Sun Radii)
ax_f.set_xlim(min(SOLAR_SYSTEM_DF['UTC']), max(SOLAR_SYSTEM_DF['UTC']))
ax_f.set_ylim(0, 2)
# We add now the phase angle values and copy the x axis for this purpose
ax_f_add = ax_f.twinx()
# Plot the phase angle between the SSB and planet as seen from the Sun
ax_f_add.plot(SOLAR_SYSTEM_DF['UTC'], \
SOLAR_SYSTEM_DF['PHASE_ANGLE_SUN_%s2SSB' % planet_abr], \
color='tab:orange', \
linestyle='-')
# Set the y label's name and color accordingly
ax_f_add.set_ylabel('Planet ph. ang. in deg', color='tab:orange')
ax_f_add.tick_params(axis='y', labelcolor='tab:orange')
# Invert the y axis and set the limits. We invert the axis so that a
# possible anti-correlation (large phase angle corresponds to a smaller
# distance between the Sun's centre and the SSB) becomes more obvious
ax_f_add.invert_yaxis()
ax_f_add.set_ylim(180, 0)
# Set a grid (only date)
ax_f.grid(axis='x', linestyle='dashed', alpha=0.5)
# Finally we set the x label ...
AX4.set_xlabel('Date in UTC')
# ... tight the figures a bit ...
FIG.tight_layout()
# ... reduce the distance between the axes ...
plt.subplots_adjust(hspace=0.2)
# ... and save the figure in high quality
plt.savefig('PLANETS_SUN_SSB_PHASE_ANGLE.png', dpi=300)
| 40.25 | 79 | 0.649896 |
2c9ce9a473d58efa1b9a1d8bda61080d6bd0e086 | 19,650 | py | Python | hydrus/client/ClientExporting.py | ReAnzu/hydrus | 069f77e1941d13b3bdd969aeeffd7ae003fcb71e | [
"WTFPL"
] | 1 | 2021-02-24T22:12:30.000Z | 2021-02-24T22:12:30.000Z | hydrus/client/ClientExporting.py | ReAnzu/hydrus | 069f77e1941d13b3bdd969aeeffd7ae003fcb71e | [
"WTFPL"
] | null | null | null | hydrus/client/ClientExporting.py | ReAnzu/hydrus | 069f77e1941d13b3bdd969aeeffd7ae003fcb71e | [
"WTFPL"
] | null | null | null | import os
import re
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusThreading
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientFiles
from hydrus.client import ClientPaths
from hydrus.client import ClientSearch
from hydrus.client.metadata import ClientTags
MAX_PATH_LENGTH = 240 # bit of padding from 255 for .txt neigbouring and other surprises
def GenerateExportFilename( destination_directory, media, terms, append_number = None ):
def clean_tag_text( t ):
if HC.PLATFORM_WINDOWS:
t = re.sub( r'\\', '_', t )
else:
t = re.sub( '/', '_', t )
return t
if len( destination_directory ) > ( MAX_PATH_LENGTH - 10 ):
raise Exception( 'The destination directory is too long!' )
filename = ''
for ( term_type, term ) in terms:
tags_manager = media.GetTagsManager()
if term_type == 'string':
filename += term
elif term_type == 'namespace':
tags = tags_manager.GetNamespaceSlice( ( term, ), ClientTags.TAG_DISPLAY_ACTUAL )
subtags = sorted( ( HydrusTags.SplitTag( tag )[1] for tag in tags ) )
filename += clean_tag_text( ', '.join( subtags ) )
elif term_type == 'predicate':
if term in ( 'tags', 'nn tags' ):
current = tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL )
pending = tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL )
tags = sorted( current.union( pending ) )
if term == 'nn tags':
tags = [ tag for tag in tags if ':' not in tag ]
else:
tags = [ HydrusTags.SplitTag( tag )[1] for tag in tags ]
filename += clean_tag_text( ', '.join( tags ) )
elif term == 'hash':
hash = media.GetHash()
filename += hash.hex()
elif term == 'file_id':
hash_id = media.GetHashId()
filename += str( hash_id )
elif term_type == 'tag':
tag = term
( namespace, subtag ) = HydrusTags.SplitTag( tag )
if tags_manager.HasTag( subtag, ClientTags.TAG_DISPLAY_ACTUAL ):
filename += clean_tag_text( subtag )
while filename.startswith( os.path.sep ):
filename = filename[1:]
if HC.PLATFORM_WINDOWS:
# replace many consecutive backspace with single backspace
filename = re.sub( '\\\\+', '\\\\', filename )
# /, :, *, ?, ", <, >, |
filename = re.sub( r'/|:|\*|\?|"|<|>|\|', '_', filename )
else:
filename = re.sub( '/+', '/', filename )
#
mime = media.GetMime()
ext = HC.mime_ext_lookup[ mime ]
if filename.endswith( ext ):
filename = filename[ : - len( ext ) ]
example_dest_path = os.path.join( destination_directory, filename + ext )
excess_chars = len( example_dest_path ) - MAX_PATH_LENGTH
if excess_chars > 0:
filename = filename[ : - excess_chars ]
if append_number is not None:
filename += ' ({})'.format( append_number )
filename += ext
return filename
def GetExportPath():
portable_path = HG.client_controller.options[ 'export_path' ]
if portable_path is None:
path = os.path.join( os.path.expanduser( '~' ), 'hydrus_export' )
HydrusPaths.MakeSureDirectoryExists( path )
else:
path = HydrusPaths.ConvertPortablePathToAbsPath( portable_path )
return path
def ParseExportPhrase( phrase ):
try:
terms = [ ( 'string', phrase ) ]
new_terms = []
for ( term_type, term ) in terms:
if term_type == 'string':
while '[' in term:
( pre, term ) = term.split( '[', 1 )
( namespace, term ) = term.split( ']', 1 )
new_terms.append( ( 'string', pre ) )
new_terms.append( ( 'namespace', namespace ) )
new_terms.append( ( term_type, term ) )
terms = new_terms
new_terms = []
for ( term_type, term ) in terms:
if term_type == 'string':
while '{' in term:
( pre, term ) = term.split( '{', 1 )
( predicate, term ) = term.split( '}', 1 )
new_terms.append( ( 'string', pre ) )
new_terms.append( ( 'predicate', predicate ) )
new_terms.append( ( term_type, term ) )
terms = new_terms
new_terms = []
for ( term_type, term ) in terms:
if term_type == 'string':
while '(' in term:
( pre, term ) = term.split( '(', 1 )
( tag, term ) = term.split( ')', 1 )
new_terms.append( ( 'string', pre ) )
new_terms.append( ( 'tag', tag ) )
new_terms.append( ( term_type, term ) )
terms = new_terms
except Exception as e:
raise Exception( 'Could not parse that phrase: ' + str( e ) )
return terms
class ExportFolder( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER
SERIALISABLE_NAME = 'Export Folder'
SERIALISABLE_VERSION = 4
SERIALISABLE_VERSION = 5
def __init__( self, name, path = '', export_type = HC.EXPORT_FOLDER_TYPE_REGULAR, delete_from_client_after_export = False, file_search_context = None, run_regularly = True, period = 3600, phrase = None, last_checked = 0, paused = False, run_now = False, last_error = '' ):
HydrusSerialisable.SerialisableBaseNamed.__init__( self, name )
if export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE:
delete_from_client_after_export = False
if file_search_context is None:
file_search_context = ClientSearch.FileSearchContext( file_service_key = CC.LOCAL_FILE_SERVICE_KEY )
if phrase is None:
phrase = HG.client_controller.new_options.GetString( 'export_phrase' )
self._path = path
self._export_type = export_type
self._delete_from_client_after_export = delete_from_client_after_export
self._file_search_context = file_search_context
self._run_regularly = run_regularly
self._period = period
self._phrase = phrase
self._last_checked = last_checked
self._paused = paused and not run_now
self._run_now = run_now
self._last_error = last_error
def _GetSerialisableInfo( self ):
serialisable_file_search_context = self._file_search_context.GetSerialisableTuple()
return ( self._path, self._export_type, self._delete_from_client_after_export, serialisable_file_search_context, self._run_regularly, self._period, self._phrase, self._last_checked, self._paused, self._run_now, self._last_error )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._path, self._export_type, self._delete_from_client_after_export, serialisable_file_search_context, self._run_regularly, self._period, self._phrase, self._last_checked, self._paused, self._run_now, self._last_error ) = serialisable_info
if self._export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE:
self._delete_from_client_after_export = False
self._file_search_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_search_context )
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( export_type, serialisable_file_search_context, period, phrase, last_checked ) = old_serialisable_info
path = self._name
new_serialisable_info = ( path, export_type, serialisable_file_search_context, period, phrase, last_checked )
return ( 2, new_serialisable_info )
if version == 2:
( path, export_type, serialisable_file_search_context, period, phrase, last_checked ) = old_serialisable_info
delete_from_client_after_export = False
new_serialisable_info = ( path, export_type, delete_from_client_after_export, serialisable_file_search_context, period, phrase, last_checked )
return ( 3, new_serialisable_info )
if version == 3:
( path, export_type, delete_from_client_after_export, serialisable_file_search_context, period, phrase, last_checked ) = old_serialisable_info
run_regularly = True
paused = False
run_now = False
new_serialisable_info = ( path, export_type, delete_from_client_after_export, serialisable_file_search_context, run_regularly, period, phrase, last_checked, paused, run_now )
return ( 4, new_serialisable_info )
if version == 4:
( path, export_type, delete_from_client_after_export, serialisable_file_search_context, run_regularly, period, phrase, last_checked, paused, run_now ) = old_serialisable_info
last_error = ''
new_serialisable_info = ( path, export_type, delete_from_client_after_export, serialisable_file_search_context, run_regularly, period, phrase, last_checked, paused, run_now, last_error )
return ( 5, new_serialisable_info )
def _DoExport( self ):
query_hash_ids = HG.client_controller.Read( 'file_query_ids', self._file_search_context )
media_results = []
i = 0
base = 256
while i < len( query_hash_ids ):
if HC.options[ 'pause_export_folders_sync' ] or HydrusThreading.IsThreadShuttingDown():
return
if i == 0: ( last_i, i ) = ( 0, base )
else: ( last_i, i ) = ( i, i + base )
sub_query_hash_ids = query_hash_ids[ last_i : i ]
more_media_results = HG.client_controller.Read( 'media_results_from_ids', sub_query_hash_ids )
media_results.extend( more_media_results )
media_results.sort( key = lambda mr: mr.GetHashId() )
#
terms = ParseExportPhrase( self._phrase )
previous_paths = set()
for ( root, dirnames, filenames ) in os.walk( self._path ):
previous_paths.update( ( os.path.join( root, filename ) for filename in filenames ) )
sync_paths = set()
client_files_manager = HG.client_controller.client_files_manager
num_copied = 0
for media_result in media_results:
if HC.options[ 'pause_export_folders_sync' ] or HydrusThreading.IsThreadShuttingDown():
return
hash = media_result.GetHash()
mime = media_result.GetMime()
size = media_result.GetSize()
try:
source_path = client_files_manager.GetFilePath( hash, mime )
except HydrusExceptions.FileMissingException:
raise Exception( 'A file to be exported, hash "{}", was missing! You should run file maintenance (under database->maintenance->files) to check the files for the export folder\'s search, and possibly all your files.' )
filename = GenerateExportFilename( self._path, media_result, terms )
dest_path = os.path.normpath( os.path.join( self._path, filename ) )
if not dest_path.startswith( self._path ):
raise Exception( 'It seems a destination path for export folder "{}" was above the main export directory! The file was "{}" and its destination path was "{}".'.format( self._path, hash.hex(), dest_path ) )
dest_path_dir = os.path.dirname( dest_path )
HydrusPaths.MakeSureDirectoryExists( dest_path_dir )
if dest_path not in sync_paths:
copied = HydrusPaths.MirrorFile( source_path, dest_path )
if copied:
num_copied += 1
HydrusPaths.MakeFileWritable( dest_path )
sync_paths.add( dest_path )
if num_copied > 0:
HydrusData.Print( 'Export folder ' + self._name + ' exported ' + HydrusData.ToHumanInt( num_copied ) + ' files.' )
if self._export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE:
deletee_paths = previous_paths.difference( sync_paths )
for deletee_path in deletee_paths:
ClientPaths.DeletePath( deletee_path )
deletee_dirs = set()
for ( root, dirnames, filenames ) in os.walk( self._path, topdown = False ):
if root == self._path:
continue
no_files = len( filenames ) == 0
useful_dirnames = [ dirname for dirname in dirnames if os.path.join( root, dirname ) not in deletee_dirs ]
no_useful_dirs = len( useful_dirnames ) == 0
if no_useful_dirs and no_files:
deletee_dirs.add( root )
for deletee_dir in deletee_dirs:
if os.path.exists( deletee_dir ):
HydrusPaths.DeletePath( deletee_dir )
if len( deletee_paths ) > 0:
HydrusData.Print( 'Export folder {} deleted {} files and {} folders.'.format( self._name, HydrusData.ToHumanInt( len( deletee_paths ) ), HydrusData.ToHumanInt( len( deletee_dirs ) ) ) )
if self._delete_from_client_after_export:
deletee_hashes = { media_result.GetHash() for media_result in media_results }
chunks_of_hashes = HydrusData.SplitListIntoChunks( deletee_hashes, 64 )
reason = 'Deleted after export to Export Folder "{}".'.format( self._path )
content_updates = [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason = reason ) for chunk_of_hashes in chunks_of_hashes ]
for content_update in content_updates:
HG.client_controller.WriteSynchronous( 'content_updates', { CC.LOCAL_FILE_SERVICE_KEY : [ content_update ] } )
def DoWork( self ):
regular_run_due = self._run_regularly and HydrusData.TimeHasPassed( self._last_checked + self._period )
good_to_go = ( regular_run_due or self._run_now ) and not self._paused
if not good_to_go:
return
try:
if self._path == '':
raise Exception( 'No path set for the folder!' )
if not os.path.exists( self._path ):
raise Exception( 'The path, "{}", does not exist!'.format( self._path ) )
if not os.path.isdir( self._path ):
raise Exception( 'The path, "{}", is not a directory!'.format( self._path ) )
self._DoExport()
self._last_error = ''
except Exception as e:
self._paused = True
HydrusData.ShowText( 'The export folder "' + self._name + '" encountered an error! It has now been paused. Please check the folder\'s settings and maybe report to hydrus dev if the error is complicated! The error follows:' )
HydrusData.ShowException( e )
self._last_error = str( e )
finally:
self._last_checked = HydrusData.GetNow()
self._run_now = False
HG.client_controller.WriteSynchronous( 'serialisable', self )
def GetLastError( self ) -> str:
return self._last_error
def RunNow( self ):
self._paused = False
self._run_now = True
def ToTuple( self ):
return ( self._name, self._path, self._export_type, self._delete_from_client_after_export, self._file_search_context, self._run_regularly, self._period, self._phrase, self._last_checked, self._paused, self._run_now )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER ] = ExportFolder
| 33.589744 | 276 | 0.51659 |
482c3e1d4897f51913108281e5b45d4f7b358148 | 32 | py | Python | training_utils/scouter/torchcam/version.py | conscienceli/node21_detection_baseline | d9eadaf1b0a81320b5c38d8b8a74eec86aa0d80a | [
"Apache-2.0"
] | null | null | null | training_utils/scouter/torchcam/version.py | conscienceli/node21_detection_baseline | d9eadaf1b0a81320b5c38d8b8a74eec86aa0d80a | [
"Apache-2.0"
] | null | null | null | training_utils/scouter/torchcam/version.py | conscienceli/node21_detection_baseline | d9eadaf1b0a81320b5c38d8b8a74eec86aa0d80a | [
"Apache-2.0"
] | null | null | null | __version__ = '0.1.2a0+6dd7a75'
| 16 | 31 | 0.71875 |
a478bcee271f8207b494b65feac57c7a0f6bee83 | 1,934 | py | Python | tests/utils_test.py | DarrenZhang01/Neural_Tangents_TensorFlow | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 4 | 2020-12-25T17:37:13.000Z | 2022-01-03T17:00:23.000Z | tests/utils_test.py | DarrenZhang01/TensorFlow_GSoC | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 33 | 2020-07-18T18:57:54.000Z | 2020-08-17T13:58:46.000Z | tests/utils_test.py | DarrenZhang01/Neural_Tangents_TensorFlow | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 1 | 2021-08-16T19:00:06.000Z | 2021-08-16T19:00:06.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/predict.py`."""
from absl.testing import absltest
from jax import test_util as jtu
from jax.api import device_get
from jax.api import jit
from jax.config import config
from jax.lib import xla_bridge
from tensorflow.python.ops import numpy_ops as np
import jax.random as random
from neural_tangents.utils import utils
config.parse_flags_with_absl()
class UtilsTest(jtu.JaxTestCase):
def testIsOnCPU(self):
for dtype in [np.float32, np.float64]:
with self.subTest(dtype=dtype):
def x():
return random.normal(random.PRNGKey(1), (2, 3), dtype)
def x_cpu():
return device_get(random.normal(random.PRNGKey(1), (2, 3), dtype))
x_jit = jit(x)
# x_cpu_jit = jit(x_cpu)
x_cpu_jit_cpu = jit(x_cpu, backend='cpu')
self.assertTrue(utils.is_on_cpu(x_cpu()))
# TODO(mattjj): re-enable this when device_put under jit works
# self.assertTrue(utils.is_on_cpu(x_cpu_jit()))
self.assertTrue(utils.is_on_cpu(x_cpu_jit_cpu()))
if xla_bridge.get_backend().platform == 'cpu':
self.assertTrue(utils.is_on_cpu(x()))
self.assertTrue(utils.is_on_cpu(x_jit()))
else:
self.assertFalse(utils.is_on_cpu(x()))
self.assertFalse(utils.is_on_cpu(x_jit()))
if __name__ == '__main__':
absltest.main()
| 31.193548 | 76 | 0.698035 |
dc66b0005b281fbe20c1aeec0bf8f879e72821a6 | 2,489 | py | Python | Codeforces/GYM/RE/2019-20 ICPC Regional Hong Kong #1/gen.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | 2 | 2018-12-11T14:37:24.000Z | 2022-01-23T18:11:54.000Z | Codeforces/GYM/RE/2019-20 ICPC Regional Hong Kong #1/gen.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null | Codeforces/GYM/RE/2019-20 ICPC Regional Hong Kong #1/gen.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null | from random import *
''' Generate a random array of integers with elements in the range [L, R] '''
def genRandomArray(N, L, R):
a = [randrange(L,R+1) for _ in xrange(N)]
return a
''' Generate a random string from characters in the range [A, B]'''
def genRandomString(N, A, B):
l = genRandomArray(N, ord(A), ord(B))
s = ''
for char in l: s += chr(char)
return s
''' Generate a random permutation of [1, 2 ... N] '''
def genRandomPermutation(N):
permutation = range(1, N+1)
shuffle(permutation)
return permutation
''' Generate a random unweighted tree'''
def genRandomTree(N):
edges = []
for u in xrange(2,N+1):
v = randrange(1,u)
edges.append([u,v])
permutation = genRandomPermutation(N)
for i in xrange(0,N-1):
u, v = edges[i]
u = permutation[u-1]
v = permutation[v-1]
edges[i] = (u,v)
return edges
''' Generate a random weighted tree '''
def genRandomWeightedTree(N, L, R):
weigths = genRandomArray(N-1, L, R)
tree = genRandomTree(N)
wtree = []
for i in xrange(0,N-1):
u, v, w = tree[i][0], tree[i][1], weigths[i]
wtree.append((u, v, w))
return wtree
''' Undirected, no multiedges and no self-loops '''
def genRandomGraph(N, E):
edges = {}
if N == 1: return []
for i in xrange(E):
u = randrange(1,N+1)
v = u
while v == u: v = randrange(1,N+1)
while (u,v) in edges or (v,u) in edges:
u = randrange(1,N+1)
v = u
while v == u: v = randrange(1,N+1)
edges[(u,v)] = 1
ret = []
for edge in edges: ret.append(edge)
return ret
''' Undirected, no multiedges, no self-loops, connected '''
def genRandomConnectedGraph(N, E):
E -= N-1
tree = genRandomTree(N)
edges = {}
for edge in tree:
edges[edge] = 1
for i in xrange(E):
u = randrange(1,N+1)
v = u
while v == u: v = randrange(1,N+1)
while (u,v) in edges or (v,u) in edges:
u = randrange(1,N+1)
v = u
while v == u: v = randrange(1,N+1)
edges[(u,v)] = 1
ret = []
for edge in edges: ret.append(edge)
return ret
''' Undirected, no multiedges, no self-loops, can be forced to be connected '''
def genRandomWeightedGraph(N, E, L, R, connected = False):
graph = []
if not connected:
graph = genRandomGraph(N, E)
else:
graph = genRandomConnectedGraph(N, E)
weights = genRandomArray(E, L, R)
wgraph = []
for i in xrange(E):
u, v, w = graph[i][0], graph[i][1], weights[i]
wgraph.append((u,v,w))
return wgraph
if __name__ == '__main__':
print 1
n = randint(40000,50000)
m = randint(n,500000)
print n,m
print randint(7,60)
| 20.741667 | 79 | 0.624749 |
e100541feb52193c0f2aa675682c8e19638053b6 | 4,648 | py | Python | jina_cli/api.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | jina_cli/api.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | jina_cli/api.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import Namespace
def deployment(args: 'Namespace'):
"""
Start a Deployment
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.deployments import Deployment
try:
with Deployment(args) as d:
d.join()
except KeyboardInterrupt:
pass
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.pods.factory import PodFactory
try:
with PodFactory.build_pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
if args.runtime_cls == 'WorkerRuntime':
from jina.serve.runtimes.worker import WorkerRuntime
runtime_cls = WorkerRuntime
elif args.runtime_cls == 'HeadRuntime':
from jina.serve.runtimes.head import HeadRuntime
runtime_cls = HeadRuntime
else:
raise RuntimeError(
f' runtime_cls {args.runtime_cls} is not supported with `--native` argument. `WorkerRuntime` is supported'
)
with runtime_cls(args) as rt:
name = (
rt._data_request_handler._executor.metas.name
if hasattr(rt, '_data_request_handler')
else rt.name
)
rt.logger.info(f'Executor {name} started')
rt.run_forever()
def executor(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pod` or `worker_runtime`
"""
if args.native:
return executor_native(args)
else:
return pod(args)
def worker_runtime(args: 'Namespace'):
"""
Starts a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.serve.runtimes.worker import WorkerRuntime
with WorkerRuntime(args) as runtime:
runtime.logger.info(
f'Executor {runtime._data_request_handler._executor.metas.name} started'
)
runtime.run_forever()
def gateway(args: 'Namespace'):
"""
Start a Gateway Deployment
:param args: arguments coming from the CLI.
"""
from jina.enums import GatewayProtocolType
from jina.serve.runtimes import get_runtime
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCGatewayRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime',
GatewayProtocolType.HTTP: 'HTTPGatewayRuntime',
}
runtime_cls = get_runtime(gateway_runtime_dict[args.protocol])
with runtime_cls(args) as runtime:
runtime.logger.info(
f'Gateway with protocol {gateway_runtime_dict[args.protocol]} started'
)
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pod
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
from jina import exporter
getattr(exporter, f'export_{args.export.replace("-", "_")}')(args)
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('start a flow from CLI requires a valid `--uses`')
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from jina.hubble.hubio import HubIO
getattr(HubIO(args), args.hub)()
def new(args: 'Namespace'):
"""
Create a new jina project
:param args: arguments coming from the CLI.
"""
import os
import shutil
from jina import __resources_path__
shutil.copytree(
os.path.join(__resources_path__, 'project-template'), os.path.abspath(args.name)
)
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from jina_cli.lookup import lookup_and_print
lookup_and_print(args.query.lower())
| 23.009901 | 118 | 0.644793 |
685f0eacf95be0bb7f197e04df49fb0ce10d4bac | 7,348 | py | Python | CodonSubstitution/build/biopython/Bio/PopGen/GenePop/__init__.py | JackCurragh/DARNED | 13963d129bd8f69fb1106ad1f47394b3211a939c | [
"MIT"
] | 37 | 2015-02-24T18:58:30.000Z | 2021-03-07T21:22:18.000Z | CodonSubstitution/build/biopython/Bio/PopGen/GenePop/__init__.py | JackCurragh/DARNED | 13963d129bd8f69fb1106ad1f47394b3211a939c | [
"MIT"
] | 12 | 2016-06-09T21:57:00.000Z | 2020-09-11T18:48:51.000Z | CodonSubstitution/build/biopython/Bio/PopGen/GenePop/__init__.py | JackCurragh/DARNED | 13963d129bd8f69fb1106ad1f47394b3211a939c | [
"MIT"
] | 19 | 2016-03-26T08:15:17.000Z | 2021-04-12T05:03:29.000Z | # Copyright 2007 by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with GenePop.
See http://wbiomed.curtin.edu.au/genepop/ , the format is documented
here: http://wbiomed.curtin.edu.au/genepop/help_input.html .
Classes:
Record Holds GenePop data.
Functions:
read Parses a GenePop record (file) into a Record object.
Partially inspired on MedLine Code.
"""
from copy import deepcopy
def get_indiv(line):
def int_no_zero(val):
v = int(val)
if v == 0:
return None
return v
indiv_name, marker_line = line.split(',')
markers = marker_line.replace('\t', ' ').split(' ')
markers = [marker for marker in markers if marker!='']
if len(markers[0]) in [2, 4]: #2 digits per allele
marker_len = 2
else:
marker_len = 3
try:
allele_list = [(int_no_zero(marker[0:marker_len]),
int_no_zero(marker[marker_len:]))
for marker in markers]
except ValueError: #Haploid
allele_list = [(int_no_zero(marker[0:marker_len]),)
for marker in markers]
return indiv_name, allele_list, marker_len
def read(handle):
"""Parses a handle containing a GenePop file.
handle is a file-like object that contains a GenePop record.
"""
record = Record()
record.comment_line = str(handle.next()).rstrip()
#We can now have one loci per line or all loci in a single line
#separated by either space or comma+space...
#We will remove all commas on loci... that should not be a problem
sample_loci_line = str(handle.next()).rstrip().replace(',', '')
all_loci = sample_loci_line.split(' ')
record.loci_list.extend(all_loci)
for line in handle:
line = line.rstrip()
if line.upper()=='POP':
break
record.loci_list.append(line)
else:
raise ValueError('No population data found, file probably not GenePop related')
record.populations.append([])
for line in handle:
line = line.rstrip()
if line.upper()=='POP':
record.populations.append([])
else:
indiv_name, allele_list, record.marker_len = get_indiv(line)
record.populations[-1].append((indiv_name, allele_list))
loci = record.loci_list
for pop in record.populations:
record.pop_list.append(pop[-1][0])
for indiv in pop:
for mk_i in range(len(loci)):
mk_orig = indiv[1][mk_i]
mk_real = []
for al in mk_orig:
if al == 0:
mk_real.append(None)
else:
mk_real.append(al)
indiv[1][mk_i] = tuple(mk_real)
return record
class Record(object):
"""Holds information from a GenePop record.
Members:
marker_len The marker length (2 or 3 digit code per allele).
comment_line Comment line.
loci_list List of loci names.
pop_list List of population names.
populations List of population data.
In most genepop files, the population name is not trustable.
It is strongly recommended that populations are referred by index.
populations has one element per population. Each element is itself
a list of individuals, each individual is a pair composed by individual
name and a list of alleles (2 per marker or 1 for haploids): Example
[
[
('Ind1', [(1,2), (3,3), (200,201)],
('Ind2', [(2,None), (3,3), (None,None)],
],
[
('Other1', [(1,1), (4,3), (200,200)],
]
]
"""
def __init__(self):
self.marker_len = 0
self.comment_line = ""
self.loci_list = []
self.pop_list = []
self.populations = []
def __str__(self):
"""Returns (reconstructs) a GenePop textual representation.
"""
rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
for pop in self.populations:
rep.append('Pop\n')
for indiv in pop:
name, markers = indiv
rep.append(name)
rep.append(',')
for marker in markers:
rep.append(' ')
for al in marker:
if al == None:
al = '0'
aStr = str(al)
while len(aStr)<self.marker_len:
aStr = "".join(['0', aStr])
rep.append(aStr)
rep.append('\n')
return "".join(rep)
def split_in_pops(self, pop_names):
"""Splits a GP record in a dictionary with 1 pop per entry.
Given a record with n pops and m loci returns a dictionary
of records (key pop_name) where each item is a record
with a single pop and m loci.
Parameters:
pop_names - Population names
"""
gp_pops = {}
for i in range(len(self.populations)):
gp_pop = Record()
gp_pop.marker_len = self.marker_len
gp_pop.comment_line = self.comment_line
gp_pop.loci_list = deepcopy(self.loci_list)
gp_pop.populations = [deepcopy(self.populations[i])]
gp_pops[pop_names[i]] = gp_pop
return gp_pops
def split_in_loci(self, gp):
"""Splits a GP record in a dictionary with 1 locus per entry.
Given a record with n pops and m loci returns a dictionary
of records (key locus name) where each item is a record
with a single locus and n pops.
"""
gp_loci = {}
for i in range(len(self.loci_list)):
gp_pop = Record()
gp_pop.marker_len = self.marker_len
gp_pop.comment_line = self.comment_line
gp_pop.loci_list = [self.loci_list[i]]
gp_pop.populations = []
for pop in self.populations:
my_pop = []
for indiv in pop:
my_pop.append((indiv[0], [indiv[1][i]]))
gp_pop.populations.append(my_pop)
gp_loci[gp_pop.loci_list[0]] = gp_pop
return gp_loci
def remove_population(self, pos):
"""Removes a population (by position).
"""
del self.populations[pos]
def remove_locus_by_position(self, pos):
"""Removes a locus by position.
"""
del self.loci_list[pos]
for pop in self.populations:
for indiv in pop:
name, loci = indiv
del loci[pos]
def remove_locus_by_name(self, name):
"""Removes a locus by name.
"""
for i in range(len(self.loci_list)):
if self.loci_list[i] == name:
self.remove_locus_by_position(i)
return
#If here than locus not existent... Maybe raise exception?
# Although it should be Ok... Just a boolean return, maybe?
| 33.248869 | 87 | 0.557431 |
a7b8a1ea3eb533f5e79edc8d2757a1632162630d | 78,878 | py | Python | infra/libs/buildbucket/proto/rpc_prpc_pb2.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | infra/libs/buildbucket/proto/rpc_prpc_pb2.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 16 | 2020-09-07T11:55:09.000Z | 2022-03-02T05:47:58.000Z | infra/libs/buildbucket/proto/rpc_prpc_pb2.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | null | null | null | # Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: rpc.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the rpc.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJzsvQt0HNd1IKjqxrfAT7FJSlDrV2qJJEA2GgRJURIpyQEBkIQEAlADkCwpMljoLgBlNrraXd'
'0gIVnjTcaJM5NsMk7i48RO1us4VryTjLOxE69nk3jH45nEjnO8uzlxJtl1vuszu9mT2cwer5Oc'
'zNmze3/v1avqBkkpnuzsOeSRyO7b73Pffffdd999991r/8yHLXuw2aiUGs2wFeb2rbWDWnWtXb'
'nmt0rbp/LuRhhu1Pxx+nWtvT6+Hvi16uqWF13jGvn70yWiVrNdacmv98iv0AP84LXakfywpxJu'
'bYV1+TZE3cqXXD1sBetBxWsFqkDhn1n2/kt+6wIWK/vvaftRK7fPzgTVYcu1RrJl+JQ7bfdTM3'
'5zOAPAoVP3lpKjKV3gn2eny6pk7mF7D31crbe31qBmFmr2lhmfeQLlTtl9NOpouErN5ks8qJIa'
'cuki/nwFaFKWkoUvWfbBJd9rVjapz0jh/JQ92Gj6VRycT6gPnXqwK5aLqlQ5rvB2MMndBz16G/'
'5qFLzmD/s0ugEELMH33AO2TT+2wmt+fXgdfh0sU/FlBBS27EPJQUSNsB75uTG7j3COYAhZwONw'
'1yGUpVDuqL2/7t9orRpdVamrvQhe1N39YNbec8FrVTYVtb7LHmjyR9XTox09GeVL8m9Z18p/Pm'
'P3q9aesQc3/NYqNSC0fyjdXIrHLt9VHtgQUO5Ze29E1FiVwTOXPZJuo8u8Qzt7IgOcu2Lviyqb'
'frVd8wWhLDXWMb4lKZXCam9kwnOX7D0Vr17xa9JYDzVWSDc2RWVSTQ1VYuiFQbtfaFf4yay9V6'
'gr0z4NkkI+q/k4ust8cKmS+lCOK+a/mLEHdJNnOuekOzMlZuK57jPRSbwuzNsxFc/sMhW74pGi'
'/bmutN+1tknu3HG71282w6as55xazyiQl0hcQg0ucsHGxcBjKHzGsnMrjSrIhIREPGH33pqSZS'
'6TO28PtakJkuZCwptJFJuL4+e3JRN/vg/ESRd2RikkTLcqAn0QGYYgs0ikAy1/q1FDVFlWQ6kM'
'if396gdqbzaxB2Rvew8Yt/tgUrzmDs3dvlP3pOssNwP8uSzFgHR7/BsNvxls+fWWVxvuvXm1RO'
'Hc4yBzmyEAWgEsoz7C8p4O8i3RLlo2isLy27cRtIKaH63i5hm0hvup8gMdQoxLTVGh8t4N82tu'
'CloBhgpaq5VNr74BKAzQSr6/UxRiqSkqBI0Y36Jcye5peRvR8CBVzXesuxaMfGPRC5plKpcD5q'
'3i+CPY0aNhm2p1SCdhB786rYqWjVq5vD3QaAYhYLEzPCT7mHyHNdhHSsPO8J7ukm/eUCmmwvp6'
'sFGWGm9rUz1tD1SgY2iwNrz35pOvC+aKdta/4Q/vk15S5Wdu+JV2y1ur+WUsVviHsMQ75XWH0j'
'NqO1F7awt6gmXcvFYNr9dpbQyW9wv8ioDf1pr9dI+9L6mPmEvMuu0lVrL7WP0j5PaduruTZfDX'
'spTqwqXZt86lz9uHw3ar0W6tptZNz+2sm4NcNwFEUVVp+kCG6uraDq17EFUCubCj10Xfba6Lc/'
'YQV15tAaPLeu6g5zL8VqYBSucIyE3Yh4J6pdau+qsJYTQAjQyUD8pvM6boOan2h8HuPMicRj3J'
'JhGLRvu2RGNh2bbjVnKPwt7a8pqtWG4z5+4hqBLaLojTejUt222ASYlT/3PW7pPtetoeUEpa7l'
'bqW777Dli4K/eKvcdUDnK3o8Tlb0u/gMaftYeMzTnXIYw6d+7dEZ239yb2zNxtaYi7twe4GVKl'
'E7dOkbN7WxftXlL3ch2r0tTK8x3rLKEjFu66cPblMxthqbLZDLeC9lYpbG6M19qVYNyoxsfL8w'
'aksfbsH+/Yg06Pc5fzNxnHsj9tDeyhb7lTn7DcqbCx0ww2NlvuqZMTT7jLm767dN1rbsH6cyfb'
'rc2wGZXcyVrNpUKRC6qV39z2qyXbXYl8N1x3W5tB5EZhu1nx3UpY9V34uhFu+826X3XXduB335'
'1seEB9F8Z00q0FFdiqfIB7LRdWg7vm2+562K5X3aBOpedmp2bml2bcdRAoJVDnBjJOnzPg3A2f'
'BuDfu5znEDgwpD9nB+5ybKffGaXPljMEn4/Q54yzBz679Dnr7IXPh+lzj7MPPh+iz73Ofvh8n/'
'2XmYE+aGgYG3Ws/J9k3CuwVkFAuqw9AyouTela2DrnLoduFIBSBVuj+8LE2DMvnHInF2fdVtOD'
'fRi3TxhT063UAhApUdHtxhqR7SIxgR7upoc03PLdqN1ohM0WVa6H9bG5lalZ1b97JYxaSPKoXd'
'kUoOs1/RgtDQRi0rxUw0qbhBph1PTX/WbktkIk85aLfUJLiFwLITuuV4tC2xWRSP3rllPIuCN+'
'aaPkLl0LPIGMAkss+b5bUFUKbsNrehtNr7GJM+sDC7jlxSkXNxdACci5BwkO8zUMM7nP/qke+p'
'qBOTjiZJxT+R/scUFQwWi4B6L/jlv11712jRCGAbXazGdYAEYLkwXqih6BB4zLO7QglxJ8JfWj'
'ObPBOrUdNfwK6EK68UAjogldhLHUgdMBx6CFZKVGbHfLjyJgHPd60NqkttbDWi28jmuqCRIoOm'
'e7x11mBrfuQWmcRXe9XasVXSJrAY4aLb+p13qh5IJmJmtGZgFrAwe5SNvtU9RCBMu15QM9VDOV'
'oFDCvmBIHhYi/JgiMbG2vGtQldYkNPXCRBEHA6NtA6Q1DgeH1g41Ahg1NH8GQmhYAT7OJpAJWT'
'bFiFAvxhta1X2qZuCo2kKypCmLgoM/sL0JZshR3AEMcwQEwj4DkgHIfudBA5IFyKhTsv/vrIAs'
'5wlgqtn8X2Rd3o4AE8S429pGqcSslR5QEQgVcxEvcWKBiNYXTi6M+SKA/Rse0qboqsMo0HknZk'
'x3K7iBS5kmE5dregkzF9qutmoxA07NQfuw+RARaRw4aQnyEVupkekW/SYzhe2+jkxT0O0WzjEE'
'YFIwhlC58N1+pQWwgubGov6Ve8Mf0xxrllGtFmpBvX1jrOnXCvzrGzb/Bf9dD2Ct1sLwmjErOL'
'SOhk3+d69vBkABwHENTgQ7xF82MZh/I4haBtdYxAB9Ts6AZABy0DlqQLIAmXBm7F+wBJRxngGu'
'eSL/UctlbSSWRm6ZOASFrUuLJJZSKOW2/JYHFWCi21EskArvZIk6xpY89xoIXZISnksGPxtWRN'
'1vopqs9k7ab5swUr/uikUFVwz+pDpUA0AufwYGecCA4AhyzqMGJAuQceeM3RJI1rkAY3wqX3WV'
'ioSDrPvXVfukFYidATChAW1628DFLbfmw/S4S1OXZ6ZX5mbKbjOs+Wofr0J50Hpp65GtL8YV9+'
'kLgOtBA5IByCFnxIAgbqedJ+y6QHqcizQf73JZATM2h/8oWPYAlhcTFO0BLC8mKNoDWF4kir4k'
'kF7nWcDyTH7W5dMqEHQLtqwA5IHCMJJliyqexl0EhdKivNp1bydyF54zEOoFhJ4FhIYMSAYge5'
'xhA5IFyCPOSfs0IHeXMw8aTQU0miPupOpeb1G42LQ4Q3Ma7so9JGTnYVe+x75M33BTfh7GtD//'
'hGwiAZL8SrvVhj12B5Yb8HcUbMueJ0ueBJsW4DnVEswitjXv3AsoK1gvwfoNiAWQAcc2IFmA7A'
'XBvyIQy1mBOsP5GVeO0qyP6oXh4jYfNHHPYSR2RRhHoxG0AEFs+Hlnv+4cBALBhgwIdr+HGLhH'
'y48V524g2rxAMs47oc69+WeEaIoWMWJCqdvEDBcAtrhCs61gvQTrNyAWQAZAvY0hWYDcA7X+bV'
'ZAWWcNKo3mv551yZxBaqESU7I0FEcCwrPrvG/7raKsGtbC0CZKykKEWgdujjxSxV+i7PFeSxta'
'LWAlllUvQ4XTqh2K0CVWEGARVLdxpVeh7S2v3goqvAfb7mar1YjOjY/r8xBbafgkUoJ+x4P6et'
'PjIxL+NdbYGT8xvv6k55990jv9eOXMWrV60j/5hPfE2sn1x/2JU6fXz/pnTp72H3/8jL827jUa'
'fn0jqPvj0BaQAUdhfqSTVrsV1Iy7vqjU2HnkcaBWHXVfQLZd82DfFY2SDwMgWX0Yd1iHuSaVn+'
'0nbmzBJArv2HI40jKtygzR8FqbkVvgWqW4ltJl18OwYLAMnnVwpt9prDbQiQh2nwGxAHK/UzAg'
'yCFHQBo/TlJwHYTIDgiR0V2EiHnGNwSJRVUHoKvL9A0FySZ0/RAIknJCe49YZEcgiaP1HT6+aC'
'WlpFaMWg2WCBJsa915gNBmWB/BHANiAeSAkzcgWYA8APriVywBWc4WrYbPW7deDUkdMH2SMBRe'
'GgKwEUy7OnQZjZCW4qmDHDQastKOajbTwu4y+x7NvyhVUel4iYoXDKqg9MLBbDoP6RGj9NrSE2'
'6J9NrSE26J9NqiCd8USMZpknh9p8s3vLhqZapgNLxwYcDTvGoJOHHyJEAm6zvutldr++4zADiJ'
'8iGAI0ITphMHAudOBBs4o1zDvrbgCO9oWC/B+g2IBZAB2owVJAuQQyBx/yc1k1nnBlTK579iuS'
'8QCoB0N/tTKXXV6q6DGKEpAiy3g7AdmeIPbR18xAN1H9SFti/aN6hisn3HijruevoumbkniPSi'
'ubKytIwTCweESgtlABwAkSTMZbbZPXNWTCZcyzi6poh/hvUSbMCAWAAZlI3JkrV8Azame+0nSX'
'95L6zl77NgMR+nxazOJ7dezTgD74UZuN/+iEVfcTl/jwXd35//AUtqYYvIDyjGF5qwufnGcS4A'
'TgeNq+LXq0g6d5ZNGO5WCOshrOMFAOrxPhpwIyhB6yIAvTgE4oAWHDaR+4DcIbZMx1D/OlBprB'
'WOhbUq0+uAwg3oRdiZoD4EDYGAiEEWgg4AF8WgLILysGDKArKc91sktyaFr9JkUoLAYKld2MU+'
'pNoE/KhV6Ot+3bkFM0rQAQNE3Q/CvMegLILuAyl2ilTQH7BgVn8KZ7Wwi4gme6LM5l6sA61CLV'
'QUxugr2tF+EHv+ccvJ5u91F+pa/iTUVUKCikMLUMGGJXm3Bg1AKz9kOT3Oj1lOr33YhEPxH+Li'
'BzUYWeg/t0ClfdQoyxNF4MMpsIXgu0G4JcFZBBecRxItW84PYxPFRFkUhgS+NwWm0nnnWAqcRf'
'Bx54RdMcAZ54PYRCk/T9bRhHE5oeOTOkqERzb3+TwAC8KHGfGZq21dAUhr9p0BTKmbfApsIfg+'
'EJZJcBbBRWcsQYOs86PYxIlEWdQACDycAlsIvtc5kgJTIyPQ4R9ZBMdJ+yhyipv/bcstq/ECw8'
'sImV9wBdd8zTZsSFaFE/RgSgWwx1yvw5dqsB1UQR9meyoISrzaBEFrGsxRaHpwLqazHJuVjNJe'
'jc8ndLCLuhIfREx1jPQwg/4H1PhgbX40lh1ZYcmPouzIGSALQQdBUMSgLIIeBA7F41eP8zFcmP'
'81LsxHdhO36ZWJCtfHcGUeJu2rh1bmzyA2v4Ar8xFX7WJAgJRnj9qyCJ8eWaM/g4suR2u0R6/R'
'T+Aa/Xm1RjUcin+Cix/UYJzun40ZUQOBIAQeSoEtBO+RpRuDswgehnVntmw5n8QmxhJlcY1+sr'
'NDi0vfB0pKEpxF8AlY6GbLGedNbOKhRFlcU2924ox4vIk451PgLIJRVzRbzjo/h008kCiLa+rn'
'OlvGNfVzFh3Qk2Bq5D4Q/BcNcI/zT0kW5ifcGXRnYWMAahRVv+UFtUgdcdv1qF2pAA+tt2tocB'
'LRodsBZKile1JgC8HDKdRx/wCwC+LzMoFxwn8R+a2Qf1wzm5ZnpLLwBgxrMMGAqZXUIyvpF+OV'
'1COM84u4kg4aIAtBhwCzGJRFkOs8bD8BgF7ns7iS/iWupJFdtjjjytBYTmg0+Swup7x9jr7i8H'
'4FUcrBeYZLg+hiv50iK9po/2JDOWmykWzbvTIgqv1ZtW33ypgI2m+ALAQNOHsNUBZBDmyB/4cl'
'MMv5AtYr5r9hmRb6KMZJbi/4ekYdfKv+tl/D01+kHBTw0EvH0kpYG1trr+Ntz3g1rETjdPPjwz'
'l6POXL8Ah1hWdXEM9T7SYUIqVUHb9JPCavMOgAEtEVPd5kjIm1hQ4i8deO82lMP1R7aMRAiJym'
'DC55gt5ngIg098OWHIOyCMLt+AUBZZwvWnR4m7kNS4am4+5XQQpNXIzUMvRX1Aig/PhiEk2cwS'
'9adJiKQVkE4WlqltbibyLrfjYDrPvkbgfopApBzOvOwyHFnZ0+5048JrzcB539pkU6+DdomRMz'
'f82ik9pvZdzjx92l5fLC/KW5l9zyzNTClSsz89Mz0wDHgxmIjeA9qI3S9IE2ToYb6h/ki1+hy5'
'hqG46sdJKJV7O7tBm28V7GV01osyrMryrIbQILBXRntIJtw2RUYWANOQAYJmytCdCCI6ES8DEX'
'Zm3C3QrqbHgSE697nS8sReghlwrJqvFNG7WidVZ9pdzw8DgNLbZxgSOELggQC2kbrx/W4htF4Y'
'M+We5EXyD7gzTDDO1l6IABshA0KFKtT5b71yyyCX6/JTDL+R2s92C+DTOL4lyNjw/VzR06DUZ4'
'hePVWj7ZPNwXyfyvriSLSkKsgQS4Hl+Jso2BBZYaunL907cFaly4DAkTQHBYY4ynj9+JxVifLM'
'PfQZa71wBlEXQ/SOtQQBnn95gJ32UckIiZS9rIiYsvgAlXVsHYEgpDatdrMLNuhw8j0kKP3MAf'
'1yd1+TvmvOD6JOiQAbIQtMeYF1yfv8fz8i01L1nnD7He4fyfWsh1BrHRm6EJfB3JZcwYEDxAw9'
'W2WBjk8rBEFseEGwe7+8A4F1DTvx6Q44NMEQ4MDYqsk3q4WMJ6bL/26W5xPdgQ6a/8MkotkNar'
'mkzcwyqswgpe9G/4dFwujXPdcbm/W5WmSO4bRESTAg3790wmQFWGoIMGyEKQLQfnPlFh/hCV30'
'P2v1FE7HH+jIRj/l+/ZSJqGuK+YbpqfUeoJzcEiYZxidwuoXrU0GDEhzUJULT/WZJQqGL9GRLq'
'bgOURdC9oH/8Sa/Aep2/xnoj+a/1uot6k0xtXSZFDEuvjRbmpM1YqYXiYycSxaNTDlpj8Pctv7'
'mBEi9Jf9tshUxX8VdluyJpA4Sc8fi2FX/fSf7qcnPq6jDEPQEYtOLzBamStqqyrSaLkWFBb1x3'
'8izEDgMeah+Kh6Chca9aVdegqs0iG9CU9CeRvg46M+8Os3Vgm6A62dwgjxjX1+o1bYwpptSDrP'
'BdO20NW9B/ld1lUIoxGyoXJhmu7EV12KDoKlj8peIGyX5PHiCvFAxJUXjVAPlNdAvRoBp8wWuT'
'goI82oRl1fBX5YKi2a6jX2Sh6BaCaBXvOm6roLkUoILtvjDhojSGcQHsHNCmyXoTq1SFmDH44h'
'pv59HFZws0h2a0+u4orBeMFQOczjwOrH+fXgu9sGIIOmyALATdC6ePGJRF0FHQ/P5XpeX0Od+b'
'gXqP5L+e6dhheIkk3VfpGkn8Sww+JbaoeG2RJckFA8PkO2NlYYeJsV2QqjyrPOMFbn9cuht/fT'
'OMWjhhb4y/LlLkjfET468H1TeQHm7CNdYN17BAxHZg6gyrF12piQ4j+nIsosvo9LyAcg709lGU'
'FtBYjL4nhZjBcA2nsaf7CHImFW0rPQZz4vpg4ojUMAMjekr61ATsN0AWghxj++2DiQPQw6AL/y'
'9ZgfU7P4L1TuT/x+xuE5fwYSbNkS9DG+hvWCfrIyhG7w7X6PavhfbkqeeLLns1F10YxVilNtai'
'nXaSblx4NwgbKPiSfIHXcea+FG8tQUOJ0VbTJ7EArGJ6Tqt5YbmqL6T+I7FZAw/W40ybBJMxnZ'
'DbsARUfqNgE03pPol/vTnfUMXV42nRf2u2SeBUkFM+TTIwDU2zCepD0JAY1hlkIWjYOWqAsgga'
'dY7bv6S0iAHnQxkytn/Mcpe9je7bInqGd932jHtoKmSb+iZez51zd62b8NLiOwcqq/aFUG5p1L'
'alhzEA4/9QcvwDMP4P4fgPGyALQXeDJhCDsghCE9MvqRUz6HwcmyrlP5Z1F7TepNT8+LGHRkvd'
'r8Q7J163pzV/2uZIFQBiNHHSk+cmFOq3tY3bMQ6dTcR7+VvZxHWDRTmw0yZuv5VdHMa3Tq4/rO'
'cUTUJRZXbT4tWlDnzkblH1hTzxsGBzDJrsFUT30mvktKe8eICEE3Q+1dM4CPP/8eT8D8L8fxzn'
'/wEDZCHoQUOuDsL8A+iEM2a/W0C280ls6e78S8rv4jW/GSZUaOWjjWcBV73vMcxEiQOJKl2KC2'
'phbwPe1BvgUNJI2aCfErTfAFkIGjAOADbaXTNwADhMdjUEDTmfwmoPk58AiJYx8eI03hUJW7SZ'
'ugYqQ4AK1YY2Y+V5CKhI0EMGyELQYUOtGAJUPoWEdckuhKA9zs9nvtN2IWoX0KSWob+HNQJ7AE'
'2C3meALAQpuxCDsghCu1BRQHudT2fo3Hxfx+aonkQZne+Fzqk8tDKqm90LnRN00ABZCLLFwsag'
'LIIOgzx+RUD7nM8wpz1ryJn4qKTpFB+l2DCI/jjtNSYNKIpBo7oqTrAGrvsAV2r+06a2tw9wJe'
'geA2QhaK/BWvsA188wa6Htt9/5XMa5y/nvMjex/RpPMgzbL+44n8uQ7XeMvqK57POkx+TvFwOM'
'PjTSjS01I8PoF/sPVYBm2NzbL/afz8crpF/sP5/PkM9aDMoiCJ3WnhGQ5fxqhsw/Y7FPS8fMp9'
'+qGeig2YZa+LxSxfrFbEPQAQNEXQ2K2aZfzDa/miGzzQsCyjhf+I6vkn6xzlDL0N+DGgHU4r8Q'
'r5J+sc58IV4l/WKd+QKvki+jSjDg/AbO/r/D2f9vLJh+nqzYOwhltzJHhnVfzWlssxa9TdwwkO'
'bnYrfopPaJipCNKqOoi1FJWSGN6wBsiXYT5SwfbNRD04qF7/NQXVDKpXAj7v+/kaHHLKfpK3Lj'
'l3n9F/TA1IajDtjKfsbUHRCepGq/oZbWgFxBEHTIAFkIUravAeHJL2fI9jUhIMv5KknYvJvEgZ'
'RUfuDDQSliDJANqdKXTQzQiP/VmA0HhA2/imy43wBlEZQDpF4WUMb5Gp8RZhUGx0QQlWa7HBGS'
'XtWTc3My5eg9qw4Rujtkxa/FW/OA8OHXYtV0QPjwa7FqOiB8+DVWTf8HS2BZ5+vY1PH8F6wOTB'
'f4ziN13CBUt+iGl0ipTqfkXtHBqUqsoqamXOLUYYVuW1KnxaDaBQhHyO5Q+Bby86R4JtEESGOC'
'oZ7QY0cT4Nfj496AmAC/jse9IwaI6IGeAU8JqMf5febnEx38rDVtce+Lrwo0Mj2qPrR6XHeDl3'
'+/n2QrNLP9fkYb2wfEzPb7zNhVAfU63+CDxFIXxk6wDWr58nSjMDn/UvKHgnokg+YOUKjLi1Mm'
'g6Gl4xtJBkMzxzdi3X9AzBzfiHX/ATFzfIN1/5qA+pw/yZA/1StdMF7z/bqmolyUJN8qNfns51'
'4I0Q0bX9V5pOjjiTVsKMcITW486lN/gMVDGq0+hcWQAbIQtMdYMHjUBxA6Sb1PQP3ONzPkFFJH'
'ibkOO4XvjsixZbSoLzoUP+Cbsl0Nvbhy4CxGKrphspV3cclqejx4CiUU/kTt1QztZWifAbIQ1C'
'8b0YAcRL+ZIceNL6nVPuD8Oda7J/9L1s2lsyY8aL1e0Oz0OhTnML1lqUtT89Uvbz27OpWhO+HT'
'E8otuC2Xa3Bcqyb9Fs2LngE5mtIwYHSuHi6eTv881sMG5HT656it5AxQFkGHYcN6p4AGnb9gnf'
'FSFw71WNKhJOuw6+92N6IRxTMUtQ09xnyGx6i/iJXbATlG/UVGX0YMyDHqL1hh/B2cvUHnWxl5'
'8pr/ouVe9mrrY2HDr6emix5ekGknSB/ypaQ4Gmo9uOXfaBkWqpv6IpL3YmqOAzlbo9+XWOlhAs'
'2H3VSk7oZ0nFeTidoDDvtbqD3k7LP0FbWHb5M6lT8CR2P1cAC2EqzJq3+n5C4kF/6gKBBU81vq'
'YDUoSu23Y6V2UBSIb7PGEoOyCMLrDIWG5fx1hl46HHFn9PsFOCrcCg3UIqjmt5VKOCjK7F8n0b'
'C4iwEDWdQiAHSPM7zWR7c1p+2PP23fMhJXbn/KBaLwsD2oIzrkDtm9ZKGnwD2DZf5y4R9Z9kHY'
't9ORIC7s0xUXEbRovfykFNkIa8BW9Eh7w69ThW44nY8//kwme2nxwj/NPHiJW1hU4SZe9Gu15+'
'pwCljeafjRs7983h4EKXWX8x8sx7J/ew8cdB+k59xf3OMuivcHrDny/gAlgFsDHYVeo5EfdkWM'
'mWETFqudeAN+8gmpgOxUcnd5+v0WXVDoHqQaoNPBWptMAMjybX5wKk/HEbJGMRIIr0jccEDc47'
'9hG6/vw6o2IhRpR4OeQZVq8fXKNqgTVeV5b6pUsGirgXYDxOe5rXM2P0A8nkKMjILmY3aSbE10'
'vWIB4K2F2/iTUIweGwYV8imAVavetpg9inSO0YH+KjUP9i689eqOBHRm0EIhAWOstit+jIcdI/'
'J3wsPWb+ESz8WhyjjQn62gwCmw4cKOHpNaWf7QGhhjrwc17wdsP4WG6+Qxtp7grXoY/0Z0D1pk'
'qq1zUyFwrygAaqsDIQlQlLeIxBY+tGCatFB2NwO8Z0M3flsFJVhvXUc2EQ5SalKFDWbu9SbyTp'
'25KBLjhbt8eXbJXVq4uPziZHnGhc+L5YUXZtGF5sJL8OOMO7Ww+FJ59tLlZffywtz0TBlUy/lp'
'gM4vl2cvrCwvlJdsUCGXoGqBfkFtcuadi+WZpSV3oezOXlmcm4XWoPny5Pzy7MxS0Z2dn5pbmZ'
'6dv1R0oQV3fmHZdudmr8wuQ7nlhSJ121nPXbjoXpkpT12Gr5MXZudml1+iDi/OLs9jZxcXyqiP'
'LE6Wl2enVuYmy+7iSnlxYWnGxZFNzy5NzU3OXpmZLkH/0Kc788LM/LK7dBn14sRAQS17cX6mjN'
'ibw3QvzACWkxfmZrArGuf0bHlmahkHFH+aAuIBgnNFUG0WZ6Zm4RPQYwaGM1l+qSiNLs08vwKl'
'4Ed3evLK5CUY3citqAITM7VSnrmCWAMpllYuLC3PLq8sz7iXFhamidhLM+UXZqdmls67cwtLRL'
'CVpRlAZHpyeZK6hjaAXPA7fL6wsjRLhJudX54pl1cWl2cX5kdhll8EygCWk1B3mii8MI+jRV6Z'
'WSi/hM0iHWgGiu6Ll2cAXkaiErUmkQxLQLWpZbMYdAhEhCHF43TnZy7NzV6amZ+awZ8XsJkXZ5'
'dmRmHCZpewwCx1DDwAna7QqHGiAC+bPxusW6T5dGcvupPTL8wi5lIaOGBpVtiFyDZ1WWguwTZc'
'eviJwTYKsK+cp2AbR+QzQh+BT0WCWvIZoY/Cp3GCqs/46Qh8KhDUls8IPQqfHiboo/IZocfg06'
'KE9uDPv45GUlDiLNnt8r9QdK/qLfcqyUS6IESdSq42op2ttbAGi5yP0bR/F/l2jJ/kK9nPF/Fu'
'Yb3kFdKQtVIVsHYv+yA9rq6nOuKGaetCZ7Mw1HZIwM67SqL26tpVW1udWLjGtkqJdQINF7lw9a'
'pu1oyEEkeSoNKltasooGj89PCSj3VKOrJ020FKtNeEGMqRlO7xlAefnXh06WEQBhc3cZb6QCje'
'aOXnuvigxkVKKRzQHijKfwV2qnDLfXYJVohfhx0UN5oRvI4jT7VRxP8RqX2FasPIFtlAgduULZ'
'5uNKakzs2ecg1dFo+THc70IexlMPoxTTe8jmUnOh2LBqO0hOzYik3zzh5G8eUuxmXRB2o1eag/'
'JgM7yKtOfgFbTxzBbFXK1S8AaYq6YOxFsiVHii/X4+gLnnvOPXVKfVuLf3DdKvw0EX+9gSXV1z'
'fUhx0sdNo2ga+dc5+w9YNzvE6kmzC8ZFLU0NukOBLi2hHa3Ciyf8FrILb4vUbyNg3ZTrwQ5djP'
'HB6Kmgatk3bo4oza8vR1FEb+tof+hhqdTdcCDbaNyBI1rh0B6N+o+I2WK/phjXxXxRpGzGXL21'
'p2jiUvK2Q5LW/EdUM1a/gmpNaQOCLFa8p4BBqHpHFHvIgjsxg9rBsFQM/2qmzz0fxIKyh+Yqqk'
'BjGhvmet++gj6zUDYHHsekfmxEaHiLEaKu0djFjCR8sVj2M6eS5oK8tdB2X0p0ZVDZo+PWXseO'
'NKnlLpZtUT8ETDahlFLR/GbNCLXBqMB0GCrh3fTYgoa8rTb2q922hENoH2vxlWi+pFIZ35RUc2'
'1M0SyqGmH6OmUBJOB0W15ntNijnCOjOtcKAYPVOkl/uklYKKLUwgCIP8gXPVbN2GhbFDOIpDMp'
'yVKi1le+gY1rj6kLjFwCfCIHpRm8VVCm1HXUUsv4uAE7jQPKLlYsxl0CnmY6/bSOKsCN+GEpqm'
'5TU36EGvJj1uRxsh3UuHRElqEoi5LII3gbqWwnwcNVyXlVVcCeTu8tgm0QLTsM2VaUIxblErbK'
'NneInXr8YOH5R7kdpZQpseosJZg2sLroJCUe8VfLMTpdCDVar3d5ad2+YmQ9uFEiCdUikeDs5a'
'ly026aNux02kfLtpBuIRKjGMyKjTjWCE3hzqJUtCgHRiR5OihKb01Ckt3askLUHNScnL1P7bMX'
'UdTempZlZkktjdaVJPOu8n+upOkwDkqEGPknsJzomJotJAl/33re+4lbgQbUn1qjG9b6Wjk102'
'OBp/0EGwgFUHQ3stqMKt1Da/9vY67xjXZeCNbT8ObaEGiOhc9zrwYW26AyPRR98aSrdJ+dm6mG'
'XpVQXqJbJXHlOOu2k9xaCqCr5gCBuSNZ3qjboOVvqfmEYU95XwDFFHu0zQiU+sCYSmnCqqZgAm'
'vdi0M6JjglovscgVtIzWDKyLqM5XQ1sUFtUsDyjSArDK6rlewgnmSig7YV0/kyOR20XFsbvqOE'
'aTCSWHQh926Dhm2DitWke+EADf/Wx5rr8d1tpKMgS8fW/5Xp3q2fHZR0de0QrStXp4nSiHneOu'
'wQ+C8Sc4LtRIVuFADE8044VQzAVBU8esYZ+3IOLIedc9VAji3RkPK1qtVkEfEV/ZDfHoxDiSik'
'hxn+Q8ZyeIVxSLFwXNq4v7W0BOYuQFIZ5uCc1NNrNaKP5XaRaizT3eDaMu22GsbSX9bqqwedQp'
'mk1a5b0GOhtpYd21nC5CzNaqgWgPmyH5VBgHN9JtHnGnYEzAyEpNwQ3j8vLyImuXfLah74gDcV'
'vnBisaTTvyoyRz6rUvs7w4uTx1WSum0NjiynJiMauANdSjDlIE5yMsSJZb2k+VGZMOU21gJcYo'
'kiMxnZhn1IkZujEUOJZmWKLYsTbpkM0xVfRze3ncd50UWD7SkN1bXV/LsR/9ELyS8vohC60K00'
'lPPFGZk/iWrK2NU4gUulwaq+CTBKwkBvZtvMmnN/rEhOQrKufkisxWyvwcs5BWkfURWP222Awx'
'CGy8GazA2kE6Nt2n3YnzCrq4iUfKBv39tHvqfOLAq9qiqrohoVE1iBo1b2eVRm82Kb971WoT3X'
'nMVjm8E51ivbTeclUwvkqLm8IZSpRVPTQsqtFQeyQOqWQiU0iXoNEVEjgwR2jXW8US2lbFbEGS'
'XfWuO8bCqX7noZFiupvkSQJF5kLdV+wZpaxRLfRMMHahEItG7rvbHKyh6W9gPCwVkVUvYzlYpj'
'kjzQxLxE9X5JseCXXitjArAn80NAeZRZndM+fjX5baa6ol0CFXVR9Pu0+eT2kWb4hEMWaab+Bv'
'PqPGJL5BgXBuXtxAwqgV74Iyyp2Gui8ZKcRDLmhfBBS0o8abJbGjiWVDxGc8o+4LflNfqck4dU'
'hXvVmgBMeTEJ+cRXJuksSJLReEmraMJh5iieFxG/tiUS6OW+rMITZa5C4Ju4r6/9XZ+Rcm52an'
'VyfLl1bQyH813u4QJRJSeGuvnivjXT3svVugimB+ALk0xwvsv7EobNT4gIpl+Lccs+hBGrFpK+'
'XpUdQ6MBCHLPzbOISBilj4txjC4IABshCUE98FFfPibymQkL6j/vVH7Zvmguq8n/4Jy+7jPBe5'
'8zo3AeeVeWSXhBicoiCaqbcw5jxXyT9vDxngnGNnr/k7kkcEP+aKdi9t95Lk5O6OxslNtcyFzm'
'WesAqfydi9BATMbHzztsoNWBQFvzNzwjwUofKX7yoP1tWX3CP2Hg6NuBr3b2ESGIbqQryepRAm'
'LhnEQgzlQg/Z9loYKjQwgcEAdoUwLvAUtQIkkiK9N00sIs3DJz1KNC9J3b5d8kPMQRE9ypr6cq'
'HP7kGNpHDeHtQlMOMDq1gyo7sRXUodv88e1ETM7bPt+ZW5uVVYJyszzl0XvmcXN4UhHo3yUZjY'
'AB2xvUaX9eynELNiAxdyJBx5nv9prP2NZd2ue8LXH7QHyCFh9Y53wh3vhDveCXe8E+54J9zxTv'
'j79U54iKAPyWflkXBZfAv4s/JOUJ4MR7Qnw1ENVZ+Vd4LyZDimPRlGDE8G/vwrFGHZOQ1fzjpW'
'/s2se5V337S7AgHbaOCgvY/2dzkts3Fb39/bonODZkvhdHbw3M2RMnG31iGq8FBIPgm4pbcxdU'
'lRd47Rb1ASr+kcI8oOgLHPt/0YOb7jT911gz6O1qEGIaabB1F+zYdzqB5Ol/MnqvJ8ccrXQIk4'
'Zl4r1S9tnLFZUt2o2fEzUz55m3lS+FKBUSrJKYbsKamm6YCu5gIQpTKCWhz2/DQlI3lKHxUeI4'
'f2ortSDyWYKU4DYL/rNCRDnWP90xKPWAXNe4yeM5qhzh+j14xmqPPHyBP9X/NLh2ccDONu5T+X'
'ca+S6pdipl1wEb4Rj3He8WyKTYGWJFauizx59GDWc1FR9j1xrqi0m+TIK3MrHIrXM3EYb8V6k7'
'LZcZjg+JHUDdjVWuKg4huvs1C12/Zgz6b0ON6aNu56FDyYfqB73wpnXRBb5y3nV6ijppcQMWJR'
'P0PR2I7SNwyC+F0wE1ecbP4QtaoMhqqWo8pBze+Cmnvs4wMqivUFp8d5IJ8H7ciYByRtqjbPOJ'
'bem4hJfcHZZ4TxvYtSHWA0wOKACkk9DXXuz9+f7KEatuFs29EHukxjeTsR4XmaXjiZEZ6n6b2G'
'6gNTGXTpQ8wm6T7wOc3FRB8ZSo9g9pGhZAjYx9iACsh8GeoM5x9I9iGM1tEJxjPGCsl4xpc74h'
'lfpnc+JwXS4zxLA3E7BqJkbLofjNzzbGIwPZRawRxMDyVSwMGofnqdOZr2VD/6DlXYz+gHHwLN'
'JSYf3wHNJSYfnwHN0eT/PMwj8NYyLPcXYbn/lOVe1ae9q/x0gI28LXyiDgtYmbPpVkld0pDeqj'
'kxDr2vFgfZatp1USlvvp6S3VOxq9j2VVpUvcTKy04viMwH6BsujRdgUR3I73Xnk6uhV0u7F+jZ'
'SQzJAGQ/yMj3U1Rp57v59Jjfdq/qg7KM/nrTo1cFXpP85Dqur2OJdCtBkWyZinnNprdjhLr+br'
'IaPT2gIl2/i4xG+G433edN9gJnIA5G/S4yIcWQPoAMyd6gQlG/iyJRx5AsQEz70bdde7ds4Tk7'
'zoubvzdtZALBKlnC19GeRPkjc3YPHiTJXtNbps+5YbtfLJGSIVN9zZXsftm9Jc3koQ5jxWR9p6'
'wKXXiXvc+wRgBSaIjAfpUh4vGbPJbgn7xGEBnjPM///EwmW16cevbP0NSwD5jlFceyf71nYA99'
'yZ36TE8yr93j5skO+GKOU9DBIb+u7iUkQ538UkTDKIWBwIx16OnmFuSnwuh5290J23T6QysrR3'
'hhR0Nf+Zih01OI2eHozlbrLtIGHEJfkhbCNTq8e3RWVtdiUsyFPZKPq2jRODc+fv369ZJHiHLy'
'Py4WjUvCvDFAFiqs8AtkfZm2xk5gwJ24cdQ8iurhbTR93pWhdzxqSqx1PpMaZ+YElRRiaHU1Ct'
'BpXJ8qL0wuzS7BKebF2eXLeAwwj4R0mpqexaMLnXfwuPAcHMeKopygvkAXLmjm3SK/SQlqYXav'
'lD59YlbanyQepEBj+tgsL7prwVagokF3jEjOFg69reKEgjmnX3T8Q6h8ydmBPyP0sHEeOKzPA3'
'cbZ4e79dnhHvh0TM4Z/Bmhw8bZgT//9wfp7LDALJ3/FwdJhF3lRSNyOw7QUgs3UOSIVXwL1m5N'
'rszxyimgJLkc2jNYp/CvLYr1ttH0tujazq9vB82wvsUZCtkmj3DylkNXMjLLYDIr8itzZ1sq0w'
'vwlO2+sgE/vTqirG2mTREW7Chr+yZu9MIuCjbq6oYX7Vxj7hLdOFCw24gtbTQ96O/A16VbmPYQ'
'b6oovOzFmn8jwJH59bC9sYn1tnzQLNt1rWtShhW6vMKgFtuBf102A03JOD0NubzilTesB7cRoO'
'ckCXQ4kp0zossU1UjE0Zt9zxgm4s4cMFnntMM1aa+wWRvxIl8xMphPQeFXOwAqzhyJCa/CrqtV'
'NpjpScd+yMECh4web4iDncTVxMPVNtGxdY+cCGZAmwiiTV2YGGjTr2Es39iA6h6P5+Q4Df04ug'
'/Utv3jfK1Pujk7j6CfQS14zScbquonjRM9lkSUi26j3dL+CVwv5dGVoDK5eKuiFHyhbrh48Byo'
'h86pmkhI5ePsNdeCVtNr7qB1kU3NdPpcCwUbGdEyXaKT6oFvd+WBqIqiZDRPizPStkqQ0tdwCF'
'fjab3qGilPY2cAvIYP63o+lUmU3QLmlGDDSy0k4y58HERCQpYIKX1HthVjLSrmspV/j+kyTBbc'
'Cqajgp2rqSzxJfbMb3XvHj3eG6H4Vmp5o5xaasFaU96hYyRz/TM1rwzyEcXSUOEUccC+Uba2kz'
'ZJiEsOmiDYv0Kq4EkTjSC8FZOzKyL1rLft8bC30NdHqqXrmKuq7k7xLNCjdPIVEfqbUk2Zrjtl'
'izHRtMXjqdZv7dD6T8pe2fxSdwokdouIm/bcspV9iNyZ1PKUF/J0gvYqzTCKDBqbPeFwZuRlAr'
'm+qPy9CTHNl7MsnBcxjZVaSZGscOXAxJmsdC4gznmlCytPOeYByQcp8szfIuuOSTZZOHVkNsM9'
'nTQUSazDC4v64NYEK8L0xbB5bR3jiylUJ3FJ0nK4rn7CS2sdw4sClpc4pinlNgXERN3ixywdM0'
'qPiIhSsMDCZku8Xsckf0PscCV0Eu4nSnMOEHUlTvdJAlHO8nREwz8d/cYCnJhJ+9sH5D/R0RDa'
'WNZRTBKJcHiMNDrLxjmNEO/JaKeO+dPqGDYgjX6dLv3xTMPThY7Y3YpzP5L0B6cxaEXJYUn0GO'
'Y2fGlj+pjHr5K4mdh4qMfZjlSUuDRpeBhz4cYGZxNa52WMiAsTUl7cFvmRAWogIMUbTtFWt1hB'
'PBiJDkJ7FFcOLVOo9JIklu2LeCJCS1nQ2oGzVrDtVdCrz4tYhiur4gJZFTe0VfF5OiK/yP4HTB'
'1WM9hit7vqcBuaQ5dciwsJA+RuuRb3pHIt4pGcshRwno4XKDr2H2IkjA41IqkddYwCSC+KRgmz'
'g9mmarCrthIrBOzkWm9p1xJzzDyBJdnlX73JT3xeJwtm3LZ4F4r+kMwCiSN+XvKN3SVWthd0si'
'2VkeQFZ9AgL1rZXqCg2O8dUFkgX6bQRjUgnDKbymgjpRM0mzudug6rc0r/EEWBdY/4aSDpHfr1'
'h6jRpThtJhrHXtZ2B5UC5WVniPIlxxkjX3Yeoly4ccbIlzHykbY7/HHR3t2k0Om0ctbOwkTn7r'
'UHEMHVdrMmnib9+H2lWcNIC7G3xx7xKrkQdvdgGIDGlNXgyK3dFwCx2/VX+H+OSziFn7wTTuGO'
'w8Idh4U7Dgt3HBbuOCz8PTssPCXmRf6sHBaUa8Kj2pCITgqjYl7kz8pJQZkij2pT5DHDvMif/y'
'3t7s6ObHb533Pcq7CvXo3NYJ5hGcFjXiBKktqDXN6D4tfrtbAuQcvhbLpSnuMNI/mUhMyWoU7v'
'ECStPCgk1N4shoIdJRQjfc+Ph2Cvcm28Xcd/4gyz+mUKSnYbe8GUzKCLu+vtekX2oKZpONvw6z'
'6/gmB/Zv3aFNtEVM1D8sQ5OAFXrolVkrr2TPPU1IkTJdn7LoYhIIEPEEqlkjiRY5Ogisg31Jaw'
'tYsgU0eg6KjASyUR8sG6O4KFVqin5XDkKJYajb3Fdck3DBxP3QJHNHzcBpIAm0R1DurfFLkgGo'
'FWSrDXRZGJGzeLJRgJo5TxdEJjffoWWC/uwOTUFd7cOLQ4AviManIyziOjSfoS+p3Y48+zjPz0'
'zNJUeXYRBMboOR1wQBPeqM8tGGifuQXal0LBmFA+97R7tLFWgi5fh3bUU0XoiczJ+DPrq6UrXj'
'Pa9Go4GKNzjX26MdVSsJ5qZ6W+FbdE/dBUUqmHn3brQc14HhE3r9440HjUstCKydqO20gvUH4N'
'p1O2o05hu8do+cQXd6S+4mOnEv2Amskx1zOEAgoMpUkxNe14Vdbp5M/6Gef7ek8bRAc9JdYPIe'
'Ro3lLvio+NH9Pql+oiEf3FLQBFSmtek5C7Mb5Teq3Ao9mhy1ysZHPTBfythLEp8HbYdp+GP7tf'
'KvM7M5anfFgnuw+/mqOHL/YudtmqH0vFIttaqrF8FM0d3agMKcY3z1e/C9G9qtx84suMePAlxb'
'px2Bu2TMcnLLy4PJ96eAXnDEAw/VxqPWhGrV0fS+EEqB/N51L4R7dUIJQL59xCN2ZJIlViNApF'
'XZkQwIdK0MBT3O0z8a+IQOpH48GUGMFT5GVCgXp7HY6FY/gwtK5fhibi2aRnr9jVfU2nFVDdSI'
'oDeSlmu1e32edAXtGE+omqETkHrcUy1cqIKjMtr1r1pLoj64bp5JX0bfy0BNl/9ZVXRzuegN3u'
'TCSbMiaDRoL1J0qnJk5F8bslbfXaAdVnyP5ZW5u9PmRR5NAfRUUamFMHteAFRwTl9IJo3aTowB'
'R+Ynctwt5NNeGbIJIJkb9BuSKkMvR7LOLXQ/okSDNodxU16qylcaAnzfg+CZvYlU5X5QKUqicM'
'YR7eDHCqc9FaRoD6pSJGsiBeKZQKOnQN3ftJ2Bd6dojZjelg6ntbeBmr8+SgvwF6IODLZUJWjr'
'xov2UbkTrL79jilicp7cVyQFwYx1pCZSB+50/JVctzUfxulo+q9BQb2BNNGFeL/G90tchheuVX'
'tkjjdYi6mMPcjXhl2wBisMCVtF+E4pbXiLT8iviCV70VjYO1JAMnHedMGQqfIN6+NFJkRYsiOJ'
'GjzeU4DJDf7F6aWVZRV3A/Ip7gvcDrXFFoM4LVhKyuX/4btgWvVZTjNJkWYq9F7I+dMOJ81CrE'
'BiUdqND1XXgNaKJM6WZSO+yQtjJoXaKy+1vGtPOLdfSrY2Tw83ZIIYXpUoOblvtMfOxeVCWxAU'
'BqjbVmfaWzJk/u0USFDWDeDom9jvxF2oY7sgJ8IIkfzF2ZJ82rU6Ae111r+t41Mh1JbPZRWyfi'
'Ic8ZUdU9QkFYv6KT0YL+GtTImUG4NFxfDyp0CaT1kqaP0f99fkQY6BhN+pY1Zqc1fyOok4sI7q'
'u220X4UUonYqRI5QDaxL09yeTuiFhcOK0Mc96odrO2uXMOBZR8U2lEApDn4qhlHDRs9iQndyRv'
'sDLafyhON6qs9h/CdKMHEs8NP4RJlA9TYH022/8EJzqeda/oPDaUQ+dm5zuRd2yeiyO4yPHokG'
'Eip9ahz7s1Emgk/4k4g6iykv9EnAhZmcl/ghIhawvzj6zYe9jILUblfWa06O1T+QfT9ueqSiJD'
'kPxD6d8x2V3Ugp1SHN/Ktj1DSeCRm3IPQ3eYuUQ0IrFPDyFskUG6iPC4OMRREfEQK/yJZe/le4'
'ZpttrnPPug2tdW/RubHpAd62bpteDJUnJQpURdTOBOFWd0vXKu2QHLvcPux7GF7Ra9cRw6deTm'
'zS5z4bKqlT9k5zq7yg/a/VLyWfSGzTyL3pjZwkU7OxduoLMgLm6hE31G+z7615B9X7wF8Tva9x'
'07i9Asvy+Fj4WGvcfMpoYNYooC1SB+Ru9DifGn2pOvubvtPpYf1GS2LN9yeXtA5UEjYmTL+nvh'
'dXtvIvveW+xyn50JqjIC+IRDAulJvcCQ4CN1LqGM6Cnp3rL+Xjhj20ukhS56QbPLc9vEBcigXI'
'AU3rAHcQ7KNLYnbZvDh+O8Ud1u702XFZuXB6k0fs89Zg9gUHGqmLllxX4oi98K15Ex6JLar06r'
'bFi3izziGyfOEo6/t6NbpSOVjcLHP2Bpr9S77dzS8uTyytLqyjzZOC/Ozkw7d+X22oNLU5dnpl'
'fm4KuVG7L7oVh5Gb5k8CksJbpevTK59JzTQz+uTE3NLC05e/DLxcnZuZXyjHMod8DeOzt/sTy5'
'qkCP5vbYA1OT81Mz2O708SOwCpq0OeYG7V5AYWYZOu+3sy/NLEG3fXZmfsHJXDj78pmNsIT34V'
'tBe4s9MtuVYNxYiCyJzhuQxtqz/+Vz9iA5qv42Xjj9q9ht9Z+n3FafIAVybmVq1p1sw6GYPR3+'
'f+e8esd79e/Re/VuMv3mgJ+eE5Oy+ow+rYecfvJDxScVh+EzZiHD9wbofXrGsfIPukvaU8PT+Q'
'/oIYXfMBz+h51eONdNa4f/PMX+PwOTF+sKlAGp5F7xPRpRMlJQnEgh8S4gL++i4ncBeYzvr3vC'
'jOMwTujpQpwRXiWBZ+euakDZ5jjrAiIPvbo7vtmTJe0MGRBMQLXP2W+PCgTzgGScffl7uadxch'
'BCmwC1S+kCVWVE60FKTxFDsPoe0HZ+0xJQ1nmEPD4+h94T9PRChcFq4VSjSYfdQCg+31TSdYxQ'
'WNSpQ1QuJmgJyFhp+irFI2Mp0T86czclmiXzxlrQongXeALZ9CvXXIkJaLQG5EuMNktBn83nG1'
'kY7SPq+QaDepyjlGBtQ2UIMdrCaDjoa0jnbMrAwUGzvDQlyOFYCapW06tH2iIC52dab2pkhSH3'
'Cfe9Lm0AMaqowhxNTAyqM0dhYvbZf6ZQ7XWOE6pfsrrh2q6b2LpVDhuHDk5GbiqQFniApxg0Ee'
'WqRGbEg8j8wrKqo5YTpUHR75KoNPz7naLDHnfibCch8MXR8QQheoEQx4kQX8oIqM85Sf49P5u5'
'fUJ4Cn/00aMFHttZ0AyLxGHa6ISVijw4GPHgruCugCc/EYycs5oWd7uOYt9mH0NP8tyQuxvFou'
'KkYCXbjZUkdfLTGwiLW3MZfIcoffpUJ6UxTdJJSpoVQzIAyYFY+7BiuX7nNEmxHZ0/B4fKeQdr'
'FKcY97sAjr5CvO3Awy2qvDj1ncP+7JlO7Pvp+Su7CikIPl7dC+P5FvlFORXYKWqwU3wzh/YTXx'
'+maIbaGFbNN5Pn0HNgr0r2CJoj9SzL8JtI3BXGB1G/dd1X6XpYd9JXBXG3ksHHwIPSauO6Y1PP'
'eBNNibTF77Q22Y3Q0+aBmNJmyG8dCzHVtA5yzVtArBnFgyGWpzRtbBQQIwZ5oLKTCHoUhs0dd2'
'TqxWkMdDgGzVWD+jkxx4ypgG6p4xzvd/rKFKo9unxlcXq2XIQPM1cWi/T9HHr78utZQDhyG2Eg'
'IQXrgpBGoGgYlih8GGmA0Q6s+S3cHAA97mZu4dL0wqXVqYWF8vTs/OTyQnn18sLSclH/srRcnp'
'm8srpYXniWXBQ64DMXZ99JVmP1y/zklZmlxcmpmY7C6GAwU15dnFy+nB6L8Am9JXDR5XM63FBq'
'RJHcgrUGyr9daLdqflNzFN11pZll8iVJp4aeoVWoFGEMMzKsisspGrgUDtREGeUQuZDjQwn1+C'
'WshhV+v9btSMCNj7OPIRwQELFasDa+FoYt6NFrkBcrKPqr6OIw887lVwqwmDghVOHVVwpkHlyF'
'uSu8eg4WPh0JlKmWzZA00xx4GZEkMYqJCWKGw4xkSgjj0b3KRkceFBIC4wZLOrld+cFN+uDrnN'
'N0/kChpE11QJzCUxrvZ8alaEE3QoOYkkEA9Shrty3HBaSpJiMfHyWgKt5N0B4qlIW/xho74yfG'
'L89MTisKm6QsbZH9UmcunUKcKAJWqZPqxqpDuquc60j2C4YgUnDiahLRhYqkOpYlC2eWqBG2EN'
'1CF1FCUgoPLnA261gVTCsxWQlHxgxpK/4WVmXDOZB796UkUlJaor7Fxf0tCB1yeMS3Y1FRojEQ'
'B6J8QdtqwYvN2zQrJ/jsq6o/3a1t5Abyv19L9KQy7JGZssiBanWecz49cD73OJ8AcmtEhy0kxx'
'j7xfPeKlJbBTTVUltlN0BFgu+PW0ZoU84Hzk2QHrSk/d4qmwBs+jqiuyIMrSIh7BIemGCieKnZ'
'MMvmpGJG2RZPRCyi1bzq5mS+COHkRNk3nSn2TjRnx37708OMA+Nv4QM5TazIR2fIGFeiMr6l8Z'
'oc8XKNnleQ/ydRT2kuzTYKBwkWrENMoKoTlOC46OEtmNTpRl5cwrYicpKoKMIFYSje2FTP+Axa'
'RkxzsoHjeFSnlR0QHPQ0GB10jUmHJt12s6ZCvrspOqvWQMngICoofbvuf+4I+TPIlCJTjLJXOx'
'1TaYo75IDxfKBCD1P/jaUvUjdBTcvnf8Oi4U7NLk7re33xAlAvJhK60kLdnQvq7RvjV7xKMfWz'
'HgxvDsYGxBVfBBU/vM6PtHYriinDC6jGG6A1r4V3xipCJcfNBXLi3ZVfxTdLqfcKOLRKIjxKL8'
'EGEu8VNp1B53DivcKmM+zca1/Q9x7XiESnYgqJEZ+SZvpwhii50+y9QtdVV3n9X009AcBWNp18'
'4gnAtY4nANcS2ODdxjXC5g06hDZAaf4HoDSHLtAdlFB/DJViIroRDceLTyiX21tenQqZZchF20'
'w6EgVrNY6Cn8wWLk4CcdSTBrDPYfsSfRt03uMMOXsLT8J+vdrlngLzBDdqXoWlbZcCtr2PGwI2'
'xKb2SByLQZqXNCQjkFPSeZM6fxQ7l7sIo0OWzQwuxf1YVCtu1eoCyQiE66Cxq4WJf6VElnBrOb'
'Zzv/0RS8dwuQFFxvL/yKKHQz5mU9YBZ2LHdznh4hlNH3u130JMFnfcfU87bHn4bLYeUDLbydhw'
'CfMl5mZa9gkDtGH5IdazZCEgdi09BI4XcUNHDlJxZG44h52RRByZG84Jp6gJYTmvGXWyRLzXgB'
'AHmDlpqbxBXhlbb5kMeqr+LkPFVYYIvGYMDPOIvyEWiziczRuEthnO5g2665wgw873YJawf4J+'
'rQ/Dzs2iCeW3LC2Q5mEynSqO/HssclH5UUsH+Xg/X5f+Z5artu+iS++evWbVHSnAaQ1GXCi6+M'
'lvNguj2Kx474ywLvjuKKyXOLdRAc94K+TSYiZsNoS0UnpGEiiOyjWrihxCWAGyfIGaEbH4/vhG'
'WIUPeT/eCJulsghygG7PCchyPsBeOOfI34HIEwscmsKGfgkusaEjoqSoFJGBG04eNQed5HSvKC'
'Q/kMTN4n7VbXVGJvADfFv9vRmBZZwfschQ+u8thZ08bFa9lyjAOMDxhmTs5JNjJ89KLgLaYHVs'
's9gTpMDHL9Jm+EUoehe1yJzLDjhSAg4dT6Fq/8z4U3IDSJ9A97zxDJwwnuLdm1zcnimIZV8Ukr'
'd2DqRL7nFyonhk0WtGPgw1pinaT4kKH1DX6gztZeiAAbIQNCgpnDPybgtAaI19lOIX/Riuip/C'
'VXEIVgVfv7rqflQWAgqJH7Moue4cfdWuWrn8eVVHn3nUgUdGN4Y5z/zrHae0gnBJ1nRp+DE1om'
'ynT0PW9GnYa4DIpwE5+HEBWc6HLbJVHtUDUjnuk7iNR82KiQdy64fFYSGne0Bu/XASD4v7GJQE'
'01nhVgDhw74xAWXQjT7jHMzf7/KNtg6pRmhMnDp95jGjd5xXqgDNHNbt4rz+ZOwukZV5/UmLXo'
'zGoCyCDgDaRQFlnY8yFe5zF8kkii5VSQSMzrPQOZWHVg7qZjHa1keTnSMzfBQ7dwwQ9YVDfwJA'
'Pc7HkKl+EZlqBN80emQivoSv9+mWnXiFb9bk9l0YDZWhj1mkjFykr8hoH2dGe0yV3ZXTdmWxHm'
'Exagna5/jbPcJiH4+ntkdY7OMxi/UIi32cWWxaQJbzCSbuaYz2hDf6aENhX0AW4YjlzfitR/iN'
'2vm44rce4bdPJJGyuEPFbz3Cb59goj8ioIzzSYssxzmX3RncyzPvxBdGE0afyGVU7BOKy3qEyz'
'6Z7BMH+Unsc8gAZRGEF2GRgLLOp1ggr6k+QR6qccPHaHwTdo1ofMuDKSfzDhlf3bWmVweeRIqd'
'Qz91bV4t8vHeOPhwMybdkFmpW0Ampggy66eSY0Bm/VQsAXuEWT/FEvCvLIH1OJ/mLe+bFur8Ej'
'+nBdpaLT6QMO9GcQa29ZJb5lsEPKmuK63a2EQmxshHD4YU1uEIWld7C17KeRFdSuOuFWy08cU8'
'PhbdwOeBhjXD0xKs1t4IQPnegHbM158bQWuMFzU9cFxemF4YQW2kGQSj59wt7xoFQfHcRnsNk5'
'7i9hPTsUeNHAhyQFOoB+j46SQdcWF+Ot6dGZRFEO7OJaryGVz0v2LRNfGke83fGRPToxc0+ZyP'
'vi5qqeOd02csOrbup6+41D9LrETo9cqaJRAU5LzuvbJmPxuj1ytr9rPxNPfKmv0sT7Nq33J+mc'
'Sxbh+XH4E+q4bfK8vvl5PtW1x3UARuryy/X2aBe2wAgwV+Hof/6zj8e9zLXm19LGzgVQdeOTVR'
'9su48Qbo85xu4Cx9xXH/txY9AD+CLzVrbYqpSWlU3DWMYgdHt5K7IK6xMnt9Qh6q+XklsvvkFE'
'DQ+wyQhaD7naMGKIugUee4RsNyfg2rjQIaMzcUGm2KpndTNJCKVBPaO6E7QA3915JoWNzF/c6j'
'BiiLoGNwQjlDLPEvkIr/Cqn4KJrXJAikvHAgzwZeGipq4F6sxfV6Yfb301ck6RctuoQ+oABQ5o'
'tqI1OgDIJskG+qmuX8S5XFoVefKgjUa4AyCBqAs4eqlnG+ZNFhRJXBlgjUY4CoVD9w1Ssw+H7n'
'Ny1x8ck/R5lJxacKNGpxqirBASO8JjF0Y0cxdmOQpLqSSUc5SQmP4T3dbyKKeeL9fiLIl+O11S'
'/MQyAoeD/NR7+srS/HvN8vlPtyvLb6hXm+HK+tfhrvV+K11S9cQaAvq7XVL2vrK8n2La6r1la/'
'cMVXeG3NCCjjfBWrHc+fiQ+hlHiRrUaaaPq9DyX+0FRRWKGop4a+opYMQ/sYmjdAFoLuc44YoC'
'yCRpxR7ZH6s0ftW/icdgY+OGcPKP829CxkQ2lErnPZsvqK7nN1rx5G5D7XW+YvF97bPfjBXtWg'
'ioBw/NYREBSabyFzw6cflcwNrTuBEO4EQrgTCOFOIIQ7gRDuBEL4TyFzw6SRuWHyFpkbSkbmht'
'JbytzwcQ6PsM47YP5HHWBytfOmcjdQENMinAhv+NWxml/fAIkVNTx+0ktHgjiOmc2+lhXQsUl4'
'igpAknOd3vOp/UH9AFIX1QH6atNdA+c5VSFYU852nGETsKji5Stf/aIjt6iWhaq3U7DpVg6Oqa'
'3NgmqGbi1ZAuqHACyxZatTQQRhbxB3LJCA183SKgYDDlCTSp5gSfxDj57Bcvi5Fg4XKUL3yF7c'
'EJy0SXXAhhoge28EuC/AUfrE2MTJ4smTJ90d32tKUEx5+xol4zRMhVuNNkhujQbfVZvosr9A5L'
'erIe3CKrpAPB62SpjREeLf0JHY/EV3pFQt9Sv/rPVENa1PYwv625j4w8n386lKxABShT+rCvRN'
'dYJBGTo6eso96R49mm7rGfekEa2ho9IJ80F3quoY/HZS/VEPu12/hoqWgUCkG3umKwZP3RyDsZ'
'tgcKIbBskYGGr+4wkjBoi/nohn7K2zwa6TvTuTSPxFY86fTs45YJQmwvm4kuIAY9bNCh1sENdJ'
'0jnBdCaJ4wpdqRvPb1zwGbPgLn2c6N5HVx4yZvD0bisYvXZQpJbwr6pfo8NGKjxIC4neWXAEJF'
'/09OmiuxXUoeHo6YmTo8l1BtVUbyOpn0oYpmVZN9WqjsaJeq/oaLyShlU//kWxqfFXoQWSSXo5'
'WAIU0Q9L4xQ5Rcnay0EzmxylO07pHcHBJ7jhFqKCOyJ3tHT/z6Qf5aci9PaZfCu0vxQbEI09Rk'
'5B8TYTqbcrhKatNyb0LuYCqfC7p/V2RS2dTLRlBPVUYw8ShEJSFE5HBYoUibOf2BUnjMakLUrJ'
'HKPYrbWS4q4JbBfbSbWKKZAwOq7Z7u02C23iG13tkLNOFyR/HDvkoE/IofzXLHeJA5yrnlXEAk'
'M5KOmXuMTgY6cnHis+9vhZ3Obwf/J2P5ECclReNNmV9Mtp9E0ly10kuZpp+fBR5Zztnj2JSIwD'
'78OhCb7Ah/HNJnw+dcbdbI7D2oDPp88+Vjr1mIsLZRy3WADRMuX9NumYg0Ncd3IJx5xrHYFErw'
'Fl9iccc645Oeeg/f3ZAeWZE9GFz7czilYJLchTvmUJNcjQgkyS2jFN1aoDrQefYNHCwrgDqrVm'
'QikTk6ML1L4qU3VVgqp4lFeEX3Fu06Gw7m+we9dVQuiqCsKBC0JvfoSK0SHFU8Z39h6cSOtjr/'
'nNMJmPJdmacnq09fAocAQQSEXMSeGZ5qInn3yyKP8zBxkAg3uS3k04E9cMXyu030WJKbVovgYk'
'f43ybooSD7e/OW7f6ul1p53sPL9zpTJv2VD2D7obyvbpFpWl7MStLWUa07dgKvvfxyRo6PffCR'
'p6x1Z2x1Z2x1Z2x1Z2x1b2n4St7IJhK7twC1vZuGErG39LtrLP3Ue2stdkC8y/eR9weXwMTljL'
'+IlDIJflXYxXBH8NdCeyUilTVjGdSfQ2DGf2LiojdL6yPOXONMLKpuiLbANTxxPjeQ3s3zVMHF'
'JxLzX9jRAzUbpTyrzG4a/8Gy06lqHojAspxOGs4lWugdTjSEqkVsPg0l3SA0WKDQf7u5xQaUc9'
'e9LWQ8L4qvJKAeMTxUOFcoVoC5r2qwWQsrzn1kOzFIhgdsiNzGQFpH00cDtlk6I2vLHUxrPOGP'
'23fPLkOfrvZRwFaJFPjk2cgsPK8qnT5x57Ev4rPan+vAwbzgVyFod9oyLv0CQ+HjYPigTma4ra'
'TXErvc7xwADLbZ+iu9oyq6DJuuWLU+7p06efpBN97FZC7pavKLUHo0kEfmudnCub6xX8HyuVWj'
'dacVKqm5UavR1rYsqaBOt29p3uVeSgkdGrnYYjrUuKhSPWgiO/tSqTN0LV51fm5kZHu5YjHh45'
'OXr+tixcgtOG3yJ37HU4yxm4Se5W/AlOH25rW3pMFD/a2i66hND5tzuk7VJrG7/dbERcCDSJih'
'wzEyM8vesIXwzqp0+5Vy/5rSV6j4g/T0YXg5q/nJyIi7NzM8uwnbrrLUFjtzpH11sK0xXYas6e'
'AYQr19DeNjIywpDR9Vapev0yaHbTwIdYa9R96in39KlR970u/TYXXlc/KbqNj4MclKcy1CSuLB'
'iqaR4p6QI+yaOJs51LTreG1SfOnjlz5vHTZ0+e1Ot/jYK4oWf5DdXKk4+fTLdSenuTOcLjB1Iw'
'Uca16W4UDjMGOrfgYGwHyaXaOWK0QwwwmmCAM7syAIYwdq/yRJbkTTUWuRLUQM02GIDCUW8RFK'
'Zy9wo3YXOoF19L1P3rF/jh6sgoDmxJKCRdMGF0rGDXxTLzPHaQszhyKclDH1XmFRw6R+wiXGIa'
'PLYrDdRTctlE0xbQbugrk2Y8N7AcpmJqjNy2SdOg001smjo7i5LgtymIdYaxZX6bL7mQxAqHHv'
'yv4yb6xtjrdIEF/4LQemP5dThQNN849zrsnfA3MO8br5ReR8UAGfmNV18u2GJN5dp0uVS7jo8Z'
'Yosf7/vr0BAcoDbQ/ZMNktJT0aWuQFvlzuA79sbmReqSdmI07ow1+KYLN77roWoN0y9xINDYom'
'obVlidn3sjxHCYuNGqqiO06zNworteM4p5CJtxwjvuqfByQVmKzYc8PvEBqlnuSAG0ocLo+QTU'
'5jtBDpshCb1b4WlmBh0rsKkMp0JKtCCgasVRUbk3zg8JaIxyPAg46snj9E5WEmub0VUDH0Q0TR'
'uyCjAhWRDX4DRMfWJdic8oY4g68HDJ7roO65L2+4QNu4C5YVFmTjy2fHLi3OmT5yYeK52cAPIx'
'd4Poxe9a6Da8CDRMKkn9hxhcvd5GG8HEY0XKNCtxWlFgLVWaQaMVBz9Vyo7n4qYhBn/Wk4JIMb'
'uhh+pHR6+0wtmlBfYQHBmN15S2/JS2wtdAzHi0uPz62MrSeDWsROMv+mvjMSbjZV9uc8cv1cI1'
'r7a6QChE44jPuNHJqyoKdwnHwoKG08QLRlf1XYv6cFWNR259ZbB419t1hK9cBZmxTjWNAQHSpQ'
'bLNRzKqXEJN042utJma6v2CH1SdUft+Dk3yUXpA40M7rEjL40d2Ro7Ul0+cvnckSvnjiyVjqy/'
'fKzkzgXX/OsBBkrTaQGJ/RF1FXD82bDK19PHIsAVKKM2+ossqqryFfaeV0dsM8TZu6EmYY8fxh'
'Crca8R0HwoKA1nnHEd72ybxqk6GBuz3VGkoQ62xvVaFHahQUsDjkCS1YCYJ3HdbZA+cYHxGrko'
'fyy+wHgfXWD8sGVmNjcuMZDficQwfRVT8bC7ax5J6/Qupwq727Hi5a7WaryAeB+9EjyYuIB4X8'
'cFxPs6LiDeRxcQ/1ucyez7+MnJ71nufFgf0+b9t3YPUXLnpaIS6ModgngybkzeO+ALeXq4Xjf7'
'5NtRrmjz4Yg9RWDO8Aipzs1pgsqZLDb5m0Q7aJj4aazvE/96ZeP/vs74rN/XGZ/1+5LxWf/ov7'
'LsIePF/Xc8Pmv+/nQBPr3Ir4ngsHmbYtLx530qvJSEAvibIbuX9DaJ4Ml3ChjB87TdL5FIJCjm'
'venAqaLvzU6XVUmMPMo3qBTRsrcs33IP2DZHj6muru1IVNBBgVzYyT1kD3FQJ/79HvrdViAocB'
'4KUGmO0tl3yyid0h3F90yGBu1/u6FBB247NCiiy0E0uObgrdHl4lS5ZPfx29/hPVBv36m7uwes'
'LUup3KjtpJ+1Dx8iCu4X+BUBw5wOVDAGZMWrDR+mxu9JNy7xPMu6YG7a3sc9raqU9nfTeB64aS'
'Dd8t4oEep3wu6lkE/D+6nyfV2ZiaPElblk7ozdx0+Dhx2qc3/3OgtUpixlc8ftXgqoMXzAzUKl'
'Q51Y+o0yF8mdRKRA/AznZIa6dTCLJcpcECanp+VtRMMHqfGOCvHjhDKVyxXtrH/DHx7u3n4cYr'
'mMxShwr4fkH74XKgyU5Vvusp2T4I3QuooGMJy/VdDYA3ElCVacu2gf4EAW8Ltu6L5bNeToOtJO'
'/i8tu5cmK/e4bUMF0K1agR9J4N17OppaIvFUNooiX23wO8ZVflAmcqaDrxJxict7NxJhiqegFX'
'oXtioh00HyZLsxixlPGRoxvkW5gr2HEwPTdVKN4hMPlBOw/K9Zdh+z2ndyyNm3MeRjdg9mZwUs'
'caAH03Xnwo0yFeCY1M/i07beZ/FdWh/8vdfZB3/vc/YX/s9B244ZHHhsyGhIRnV09xVRMsI8lc'
'2quXfYA2qXkSl95CbNqFBbZV0JBHYfv/8W8jx8k+ocCqgsFbAqB0WUaN83q1qmgmWpkP/HGXvI'
'GFPurH2PpG+GyaqvBxur+H7bCG5+WH6eol/L8mPuWfuQfsO0avBJ78355KCutBgzzIrZln7aE8'
'EGiFNfSA+uMx610ayGRRiLWz0dpv1wsKy/M7vkf7XHHlAzkyhuJYvn7rH7W150bRV0B6ZKH36d'
'rYJsPUQ/KBrCkRS1NYkRnsPflvinSf6FIoTjVWfQYhWht6y/556z91NrBg16b5sG+7CqMfxJex'
'+cjDvpeTNxvhdqGE1cREmNQeGAfli1dBscXooDqpWldu4cKDYc4QLFjOgYwyWtpW1PlKbi38tm'
'4fyHLduOm+wadB5gmHtFpoY+w0I/eN0DeQlq8yp2s0qo3DoEuYO14OD1ItShbnHu/fr26rbXFJ'
'WuD76+4DXzL9h9vC5vyjo3DWDPwSSEW+Rb/h12Hy/a28lIoOiRielReNEe1Fqr2b/V0b+IQeFp'
'EQnDsU7MiKmvbzvE+S980JIY5x/N3olxfifG+f+3Mc5tClF9N3w+Qp+zzj3w+TB97nGG4fNB+t'
'zr3Auf77H/WQ/dbeNF97/Hu+2f7nEn0eyBXjPE58U4N5UyMQb11tkzAMaLUB9vISIj6DN6GdkY'
'vvccxrjkIHKRK1sI0YpDztHpTrlmNX2YPbyzQ/JiUnuOPUe+f3z/i+citsD7+J67Kp43aKJohM'
'h2RoI+jEIUCfqr0obsoeJ/iBF6abZ2VBTwhF8i2xc4Frh7Idh4vu03dyR/T5wNNAoluAHHZd/y'
'2JKiysuFNAdbaNT8loo5ydHMdbFmeJ3fuUQ+x/IJ9ctfppRP1nOJGIilk89zWmRgXbd11G2yql'
'CP5JeNdiykcTX0JdkQe+Opx8UeRRmq1Uxqm+Ofx5ilQfWce+oJI0jfCBlQPsB+rsiKGAl60cnm'
'/6+MjjpxnVzjQgmwjQHniJ7+preN7lBG3P1UmFp2msCFTAcUiSAJP2C4DfLK26G45GuqKZ09Sk'
'c+hhMOxcWETlJtUGxtNEsRXI26FarwT8SjVX+tvbFBDTU5H2sUjbVgb8AY3mjLA37dgBNIPKem'
'tZ3WcgFPjwXFWOxYhs3ItYmqL+Eto6KKCF6gY2q6It4qiFNDjFuj3WyEHGDMURNBQbltZ7/9VU'
'uD0OR51ulxjlNIfp3zlm4g2JRdcifNaLY8KxjjmsxxakFf0EusqHYZnwNf77IoseYLEy7K/m10'
'GGmdA9Zs8s9M9EKsXhcwzgZuAIWG14StHtgzWsXwYuiNfrc5GJBkOJyTEgIghvcRPJ+CWgDFx+'
'9JaBagI86o/WbGAFvOO6CBI/kPZmivVlF6JNSPxP32NjDzJIsZnZJORSqEzV1NKMYoAqlEG/6S'
'ZG5AH0zK3tZSlYpGCHGcbUpOgN6DN404UwMCoddlFwpHbXSQ9fFKp6DOOwVX6LxDsoBj/2LSIP'
'SbkTjhZDMTg26BRzwux9bx9BSgsRUpddY5niArRspA+IEUFOmac9wUNAvQR5xH7U+YU5BxLkED'
'Y/kfzqQjeEW3NQGgSL87XIvJP/V8URoquhhgplIb43jIFMZPks6FDbpYSJzsU5F86PINr1JgTT'
'R0kkTQW24+BYT86nE35vPbnAGqOM5WDh0wSBMJ6I9k2pOC9gF0r3NvCmoBNA/KQRKaBSgGT/xo'
'1gBnnQVcQfnvz1IS0WbbN/lT7yHe+jpegKE3LN2bSRoDjI5VxCkisRFEq6b9heLm4hPPVrtZp6'
'Zl78eq+h0Ntk6RfrDtMeRelEkky33qNGL/Ky2IEl0wVWkPo8Qo7eY6BdsMKKMAMMVie22pvcZu'
'1Os6Q+FIm7VUTqaHgY3Qq220KAhKlD1oZAsVlQo6QyS3+cgWLc7TD0lb5GhsRM5Tu2qSKPH+E9'
'S1lqKC7qZWHapwOD2XnLHEXKI6h/CBFNQCKMZqTUJxijFe62mBWs6Sg5FLs/mH3TKlY1Tcnd6c'
'jX3Gomq249hPasigs4zpEwrHOoK0FimU5la4zZvfBQlMmYurwg6FlfdpXFWM1W7QjEAnjK5XqO'
'uHXGX0vo0uLaqUbNzaBZoR6L/LajDuqlXaVX8/231XVbkx9ds0FZEzXjLAknhmQ2P2KufCLJk5'
'dkuG2ECl1W/xW6iibeZ5poOZ0QTmJm8EJYSMvHLMr2yGx14dTZXZrZtXjq2HUByaOLbmNeFw4V'
'5B5yu80z331sLVywCNiPXQyHgIOjmMuLm60YY1XdqqPrKE3nn1jbEYh7ejNdi42eHA1B2HUh2Q'
'1tAa3y0kxLAIiUbYaPPjcJRbNUPrNpafilOLE74C2tWhBLyP4PkU1AKoqXeomLVV0jt+MmOALW'
'eL9I73Z+ToQ1oAbk5tTgXCi1ClSA2USkLZO1lwxiSK+Z1V3+S2BhS5opRK3jT9bYzvvn6TOiqH'
'CMUqMhmPbAvNsK7Oft00Pn4bEe+I6AK8Eba0LTY1NTspqqOqgcSpGqqGCpu7lVA1VJCrrYSqoc'
'LnbpGq8ZIBzTgN2utmMPpt1H2xupF4trR0IDwVFpf3cYmMa+zPluzPjcT+bMn+3AAx4qSgGLT6'
'gCGnLdmfGySnhwk66EQUTdp2VQ5Kjhl9F8nOSEeIvkvkZhqSEciUtNaCb/sKj3GOplWJeLyKof'
'wws7cWnZRpN3l5WIo7tqiZvbobqwskI5CnpOM2dVySHEBvtccM1Y/bR+KlIapMoP09dijt1Uvu'
'rDJlNBNbXFEScaOZhtV1OIHWwp0tDsE7Oy02iETgw6qvAh+mPDewszZsFabnxk6H5wamC7cTnh'
's7tMH8bOy5ge4ew/kfs1Rkxiq7vKlEJLSumEnXtBmGXhS1KVu6MsfoeLEKwKbLUW294DYmp+Zs'
'8dHjY4hhJeASGKJNpUgzH1a+j/L37jceTfYRbCjxsPJ9zh7DncUiV5W7nXvsD/UKKON8kCPM/s'
'PedIzmoPu8dctKFAt4Sk3U7ghHLVTAH71dY1nKK3IstBVGmJg5jm1JwDbGoN1EvVDetQdVG5PO'
'0RXrOWNPGdGPa7nJUZC064I+Q0DhR8dNn9J/VfXbOJ1vhu6mdLyRVuwAFq6nbGklE4ULiS7UZr'
'fRhrN1vSXWVnZek3FxPgmytdH2sIGpyavtpphTRB/dpQP1uH334KDVEBEAzdvH1GpeM6jhI4r1'
'GupJrECM0QtIilGaeoAPW05FKBLRu5mmL5o++rFhbnF0gYSmrxfdzWBjE6MCa8QwxKBYxmCvQZ'
'qOkfNRfQyVEp0pQT1FZl0m9itCUf5B9isa1h5DGGL2g0m/IlyxH4wDGasz1gc59p9K1ZB1ftyi'
'ZT2BVn22qpLEYRZviUFSHGuMNW6kosZjALXyQRX4Tx0CfjyZNRt1+x/HcIRmKcIAF96UgCRMNK'
'aPuClK4svTHace1Qw0HpOpJxltWiUqpGjThwwQRZu+B/a6koB6nY9gtWL+AffFZJY1Sh/H1DG6'
'BxnCNaCdvG64FwTRR+K4nSo94EcwbucxA5RF0HHnhL0qoD7npzmK6Xy6e50SUwtlWs+8X6GaJF'
'l6i65k5EVulYQABr59gC91AR0XNSZ9quP7DJCFIBXulEFZBGG4U7XD9XPg59H8O9P4irx2F5WB'
'JIGsyqqHOqHKJbfmqwcXscuVgXm/ij390ypCKkP7GHqfAaLY0ypCKoMo9jRGSP0htc0NOG/yTL'
'/ebaZR+OJJCVAHcSFpjbQ9Uvxvn3GVhxcMT8I1gyhAu4rIVq/VwnuqKt9TaHu83cHGAzC6NyWI'
'9ajGewBG92ZydPi+8M0kHw3A6N5kPvqqGt0gBx8+nP/nlpFu1jgKak9Hw9JEpn4U1p2JofXQy/'
'reg9wsiU7q1YRKrWLeXVw0kicmUcBMqdgZ4KATTiPTCiMbxBlUEZbfNJl2EIjz6eQaH1QRlvcb'
'IIqwjLkhf1ERx3Y+h/Ueyv+0ld7wxYCQ3O1hY7yis75ILAHtT430K7HTJFod6EryZOnUE+OYlG'
'LmRqOGJsM468wKPTU44z53IUmnlCE7vB4ZBLCBAITyp1W4cYb2MnTAAFkIGgSJFoOyCLrfedD+'
'LzICG+KowsP5H8qgtW1+gWxtKXGDE7Z0eWFlblotT/U8HS8ToogPfpVWnS4q122qTe53aXsmTG'
'rS0aqUeiMQrNPzBH8saq9hhVjtU5lMp56nOxR28K2pN/5Tc91bAqa8SVNzEeckRAWk3mbjIWvT'
'it5DKhozEO4hTckhFY150ABRNGZbAs8ziKIxH4aN7nsVw+3B0MoZ59H8e26anIiSlqTWUgeD8s'
'TI6SWRDRBjTKTsX8aY9sCYCItfMzfKPTAmgu43QBQJ2gGGiUFZBD3sPCKmr8zAXo7NfDDv8j0X'
'v0DoarpTGOwFDKgSNBXL5r2AwZeSGs1ejvxsekrvBQy+hEjl7I8oLt7H8YYP538g47J73e72Q8'
'XJ+oaPE5YG8VWcziXMj13YPKr2J2T+OhrBgvXkAsGNVwmvv8t1FcgFyZxH2mwyuWu7Tnf59HgX'
'xWnwGhVM77emxNinYjR/yVTV9qkYzQMGiGI0myJzH8doRpH5DcXB+53fpX0o/1uWK4/bdPCNOJ'
'WbORBPne8AfdoT0PjTQgO7vLa6ckEOea/VgjXaG9HlgQNr/P0QXY94P1Drd1XQcgXqQ9CQQZf9'
'QKrfRR48bICyCBoG3a8lIMf5A5atFTmopJJRkxBNm2Xf9gjUfMPxlvsFbGJlwYFB/IGK4a5AFo'
'L2GhLLgUH8AUusX1fzfcD5I6zn5n/evLilDVuuLpT3idInFuo+WUTxEMeqAQwkQIMAv7tQ7/AC'
'vJ/f4QGKdQ+Ow17jKX7fWOR/niH7aRWWsCYWJ+GkDKZ8rUHkvRjUgIdKqHIZEwr/8gBMUB+Cho'
'zZOwC0+CM8k+QNUBZBD4DY/y4B5Zw/5bwW46Aoevoy9jaSMKvJyQE21Aa07OqucoDQnyYnJwcI'
'/alFhroYlEXQQUBbIXTQ+SaL3vH4qkw9qWKv765p0A2E4IjJbfypShXE0F6G9hkgC0H9hiQ+CA'
'h9kyXx5xW3HHL+HOudzr9pwTK/EWy1twQFlLbqvRTfQCsrAhqYmWioIBeTpIy9brplNuM4BvRC'
'MWWve91Id9eRU4+fkXJGJkUJ+Jdx/6YpJw/B1Px5HMueQRaC7nPGDFAWQSedU/ZvKUocdv4S65'
'3Kfz5NCe0Hr2gxH7ZiU8g6iQUyK3W663+HCZeimdFmkjaHgTY0GhjkaT3qw0Cbv0zS5jDQ5i+R'
'NkUDlEXQuDNB2a4s51uYqeFHso6Vf6q7XOSQFV7EAhKli0cJp4NKu+Ylc8pZ3B6mPfnuAZWJ8K'
'8Qp3+ccbL5y2Ya4THtHQf9ke5OQbUqXgN7rSq7Vso3TM6eBwbivIZ/hWreYfsdGjTo/LVFd39F'
'EdQFXnyFLheARsLbg3EDgDU1sc8+bAKhs67gjAK/T4PRxvwfLKfHOZZ/t/bWxGAtKgk1ui/DaR'
'uUY9iUS8pDylbGJ1I7rwUt4KQ6eSnQX+71JmaZBVnbkGep2+qD38LUN8MmAsAlhAIgt9/AmO3P'
'9IudAlsIHnIKKXAWwUeco/Y/yRhwy/neDLRxJv+3lrtoXEpyhJOmcYBFBVi5uxDVxZ8bZ0ESkO'
'OuEx++8YYuZjE6ZGDaRN6b0DELGq76KOl1fC+DT+QFdhTC8oG9jyO5Smjl/7e9N4GP7KruhP2q'
'tJSeenmtXtxd3bbLskFqW1K323sbA+qWsAVtSUhqDGEcUZJK6sJSlahSdVs2TvgIAZIA4YNxWB'
'JDJiSEhBBnIzFM8oUJBMYGglmyMDDEgSEGJgQTkpDFfJ7zP8u995VK3W2SyZfv95v8CLROvXff'
'veeee865Z12tlfE5UDHCQNgyUF4LbUjehIwahHHoH+Sl0VzOsHutwWGZIcZh+macELr6UjiEAZ'
'x/yTeBGYn7k0NN4CzAR+hwLwTgTPJqDHFjfqLgo9b/VfAdfhzmP/7OliZwB8BbSf6mwRHAheSa'
'JnAW4OuTG+KJAJxNfgwj781v0lD8vLqJhxiH4ZOHpI/dmJoBjJ/8S9wEjgDuJpmSBvPUYAS9Jm'
'edQ1+fIcb1DTCugmse35JrBQwpkvdw73xjl4OBDb4Hk/lEJmnPfzdHxx1exDluxCXt432rerhF'
'y9ZBrghpyifCx0b6bjr68oBqGOlxUldQc8jGogM1VodMcslrOADm/dUWYPThAS2EbkNqpgIq5H'
'IUnGuIvQrXLbp8QQGrahF1nrl3ULFxDqrYIWn9rvfS5mnLPQud1vh6UhgO/E1mp6tvumKHPP1y'
'BbGqGvEq98cUDuu+2KgMxuE9jYrrgyY8wKFCls3tvsXiMcfaHR0+CxhKo5SwgpLvAzEmdkrcJy'
'UNLb0TNVznS2oxqHOBBujqc1WpI3BnqbTqdXXJLTGpyDN2gbzgciGKeWmxf9cHIMkoHG4sk4Su'
'n4rmRhQt10rEwCuQNmojRZ6JEqJSR3gFrsodn1c04CYLJ5ZgACzpxNRJGrFSgRdNDcprtkUyfQ'
'tlwOAS+sa2OPN7cWaBAZR3cBT+fHlN/K8kLRCciQcR3XJPL2eIHC30PuNUsX6qXx2fhzR5Qx1q'
'B5+JhrZYH55UYO+91rN9pVQ0NotybewIMVcc96iid4nquFq039be4BgNNa0BiKsuovBjsbBUrV'
'ofdMWJdNMt382FQ9kotTSUjg+S8ItiGJ+L6J6QZ+oEJQ9oVvfOsTxTnYgnbaF7Bgdtezh0l1+j'
'ywbdrL6ZaeEhl30IfNAnpAUEl/1cKWtThasQHe6M2ep1DT2ZATGtVqvLKe2IkMaX0PqpIuuCK1'
'zZyOiZt0D3UrMn3F8cO+XPs4iV4FDqobWBURG77t7GlhOtn66WFzSg0cw7cyX+F8PkoAg3sTcH'
'CpLOJMVL/Lf15lBFRRF1RuN5J0ZM5J2+YRCy7UDzRpB8462gndob79vwa7v8uqXFTxF+2prsaP'
'FTFj+hteGbM02/RckHZeP/ETEGy1KkAdQTVGZXEq4Yd0B4RkANsW46NkKKs2lZkn6r6VBfprNY'
'AsMDVYujolhXwnYBpSjaZ48yhS8vt4qTA+Fjgi/u43n13TH00mq50g8QFw5ZIh5RnOOqFSXHw8'
'5Olc2bALWO0UJY27MBnSgo8cHWmxAJPlttAlS8D8omfKX59GWSj2G86/MPZYNq+tUCcvX0/gUG'
'qTW8cWR0T5CWgKJrsOOBDHHHGDC2skgI5KNVpCckO8cPMl8l7tuPHIjSXRCfqN7l2C3kqNq/cC'
'YPm8EoYIyVKtO/tkuxQzZPil2pIlOO/eeYxfuyyhq9JoqMFESyR9O/woIVB+Mi4pvUCeUjNAW8'
'dqpYD18BduT884VLJPed5VV5hjGKT0ogcnllpbRQltYrixryaw5CFq4YUcWrIUaKmluakLEovr'
'5d5wrWtSAqqNm8zR9sRVTQtvnXi1v8FOGnS5IjLX7K4qdrk+viatNP2eTjGG5f/gWF0crpcq3K'
'6aY0e7oPQYT4jVGupWyy5PwHzAVCnS32mleL9UEz50/ShK7fMFNo5x9vfWigeX8ch2ZXi594GR'
'cSK6y7nyCtHpFbxQ86NX3zft0sYpquFIVJF5xzVqvD3vCjtL5HTKUPBKtw5UfSNw+L/nwkffOw'
'8M9H5OaxEICj5HMY4kK6602nTAacZDfeKm5n9akvApyNv/MIxMvu1C/t8kvcBOaJddP1Jg3OAr'
'ybKPnHowCeST6PMS7PNwozmLwl/WneeKG0QioMeiTBwWD6VrnurMrhs976RKRZbMAtuqZ1zpwy'
'LwZpf68N14oDx7OhiV6Ymj2CSz6/ca3YhM9jrZc0gbMA9yaXxUsBOJv8d9mwqcKkJri7lBBaug'
'TqSdtbwNAcAbprtbZW5JDCFxSXy66EEhTnFx85PDR05Npr72haBw4Wf4omcXlqZjhU/EtXEzgC'
'OG7as6xMGXt2QwBuSx4Vq85lnvB8Ur1zLvCa0vhqs3e3NIE7AN7ahEaYHh+F4eBQEzgLMEwftw'
'bg9uQrGPnK/NUtZ1X1dxJHJpvMEnFBX9k4SwQGfQWzvLAJHAG8N3l6EzgLsPUXNnBH8pcY+bL8'
'pYXjcgPyV/W59XSYVDgcYn/+cuOcOmTA5jkhAOgvMaeLm8BZgC9NeuNnBuDO5OsYeSB/sImRBF'
'UHmvEVkhuie3gEGvuy1AcR4cO/7G8CRwAfCCxgAs4CjFiYIwrOJN+EheUV2SSbv7ggxQXOZV/B'
'ofxmhtNMhh0IIuBbIgIGdJwNAgDFU1K5/KEpKaMMnQeh4f1JyShD/1aaP2SUoX8rzdAzytC/JQ'
'y9GICj5DvCH07YDF0UblpC9aZ6rK8g54sISSLxmwJ4m1YAbs4f+VbIzTPKzb+zcQWRzKq7ab3g'
'5t8RzvC1bADPJE9ijD35z2SREg7VfLBeQmom97+AfqCXIL1dITkcUSGIpWfFrkYXySJuaXxgAw'
'skSbRCaJ6BHYN9lc7YISMPIdq0Eoa8oIcEkurdV9AwYrlaXACAiRxp0RKmmnqRftLq0CenTkjq'
'8SrSuFmf1cxfhAchsv+QxPTzU1AU2aUepCtzxpv0VBt87vRRmB5eNiuvFm4uvOTye3TD7j2Ef2'
'Ih9x668tAVVxySZ15yk3sDXznXG/SMvNGoLeNhC3i6/B4Q/b2HTh96Vv3my++hX1/WqK6V+m0q'
'B+99+kY4DXbwXhouRUkQA7zX3wllZUZl5ZMbKQn0/SQoaUcTOAswbjl26LPJj2Tp0P+YHHqp/H'
'GuQw8JRm/h0L/AgXDofzRLMzmQHy4cH5scKWjdENFglb1Z3VVNR7TN50TmkB3vDcdF3UCMTN/0'
'JyOrnIB/iZvAEcDdAa6yygkInCcGORyAo+S1GGIX8apxbUryVGeIk86D0PAHUt/ESX/txhlG8t'
'XuwBGU1ZNOYNSH3M5gIPUnsD8Xs7PRFN2fsC3TLZEMJ4ZuDUARQNs0xM2UWwIdSC5y40fJ6/Da'
'Xjc+1sIgevBi9yYcJgyNAxC/263uYNM7CQRea+NnktfjtV1ufNAyg+jBve5NCGuG5gJQBJAF3J'
'iu9/o0frLJG9LjQx9jED24y72ZpfHfkB4fVPyG9PhZGQ7jr+RQ2evNWa0ak7/DG97qYSUNsDGJ'
'pne99Eph4t7Rwpdf8TOee9wj9kD+B49w75dfcb96a7Fg+iAqoU7yn9j9+7IcufUsuPQ4Y3FsZK'
'OEQgHUIImiuDFFhtFjYpXHpC/JxplIvc+jx8TpfUBPEoCyAO0kvFYUFCVvZfQTfo6JATgU8mu1'
'9Q2TE/eEysvCTHVJfIZ84XUi2BJfSEQMI3bRTR/UyV+keex2E8Mpe2t6+pFMzXbX5OhbZXdPKC'
'iTvF3we5OzCKeUlHKlcddgrbTcchVq2vWTA2nzeG810jMW/fb05IC3t6dxC0p+O+PWVTj92L44'
'KCu6ob7pOeuXpiqU9r4xE7ehLmPLGlbp8p2Z77d8Z/b8y3f6Cpxt51WB83wL8rUs1dnZslTn91'
'1Q6t174hxXkHr+/ykn9X/KSf1bl5PqD8pJ7Y7HiH9cQDoNEyOxseEgyhTUNsz/8sGG86fo1xqC'
'WBAeyKqZNHD0lYPyNI0t8e9a9PAFSS/Xp35vJqUfYdiBzbL34Erw4tIex3wmoVTN030Evbdrhf'
'7el/celCgt6SUoSfRyiUHMCa7D3EX8lK1K2sM1x7OL3be3+PK53qDPAD/fSyBBufxZ7LU4sbI2'
'C/eDD0gwFb7KobVsfucJ8J/aylYzpjEa8tqxPovx1ejeVL4p8JdvyjftZYEQ5pv2kjzYmso37U'
'XB6/ghn2/azylPD0bMV3yTDRdhyTP6lyRAcY2jst9gmO9jRzNNSUgSlOzlxiGX5KOdtMw/6V5P'
'JaViMb1BA1eomP2cwhMmpfZzZlWYlNrPdp5fN6xkkiFOrPrZs2PlXyvNatPfn8LqoSdg0v3JlW'
'5lUICHUqvHhg9xdpaHZAmC5KwH23KWrTjM6tcvtjVlL8l5+z6Sl8bSu+/mL9Up+BMhCoL48qYH'
'8KNQGtfpcbMDbuTAaVSzR/4m3+idK6HDQK/k8coDrkGk52h4W9yi7oUa+0wl/0N0Shcl7ROSOZ'
'+V3+b4GY4AgAm/uHBUBKCeE+3qJCdF/kjFjgro+PD48VHCB/5Cb+SC9h91M6i3mB6nWUCuwDPt'
'VsgofQ5Lm2rdg5sXqWjU9WlIfK2BQBN13mIcCOH1gAhxTwLtDCUHHYHhljScYkwQNMOsRXsIaA'
'5K9Etzlp56C8fx/0DrKg1KifSbYBrzlcyGUC6cI8HLvt6mXwshKKnUHbDXNi6ntD3IZW/jUkq4'
'lv4nk2rtySSu1fk3Zs6R2cYHwXU4DdYE4istLw7ELdLezi/rrUURlZU5vfcshAU5JMCmutYnQm'
'd2DSE6EqFM86Bbyp1q0EO0P70HNwbSsCVBRzt0EFHTa+eBa6MQmOaBpFvonp44mCAuF0AigiCR'
'zkOyBDmQXORuMl//62zc1BWhpzuov3vOXg1nb8VwrptQ759GcXKsujZ6GrZHDb7s2R13oEKxNm'
'Tommqnv8YWevbHXbA7ztbLd5e0zXMOgGn6Gz0W+Me16p2litan5cdnAGi6RrV9v9eo9vO+RvW+'
'NN4RLEzSXXoG444SQ2hluC3tTtU6tuen9KGep8fbEfM1G6xMSvJuBXjSVtd7TRz7Ys09SZy9s7'
'SuuMM/0RybtTB9W/7oPRpvk7dOkIa0yZt74g7xtNGrWdQClr96fzMTZ2m6m20V7UZdMqF8aewu'
'hdDPNB8EMkm1enqJ/+g50nTxzDcjR+QoOmu7y+dFvK/IBVipL/H2dPHeEeS2+hIQqNmIs1anu0'
'MQaM3epFz3TXEc1MOWotb7U59PY2oqeLynP26DNVbLV+9qnvUY/TbFT/R+Jxt3KoQ+uQX8YLmk'
'VffPUUg/9TBKMWv4uxWP1j/RTARdR2sktWfLq3oQYgONreIWnvIJzxbrWr96ewo+jA3pmi/WZ4'
'FOK+CeJtjjw9PYk/pUjp7jf/U8N+7BLWBB4uns5Q7tShC+jCvLAnv/ZIyk4gA61h3xvrDMdV1j'
'9GTITi2AnprP2OSIFsAOBt4T1MWue3jPobgNbgDdt/R2T55ar8N0OMpVHKb4QeIEnQuI0qTT0N'
'WCRJresWd7XxZvS//U0tDTTA+Zp0IPaF9jm03/6v2hOGd7g2LhUs9jtrxWWqlrk5tugY0BhPk4'
'ppqd4n+jiwsKftTXzteAFMvjzPzW4+1N27tZgfYN331mvHW5SF8lfep8v9yNF07WuYFM79uieH'
'dLOmg5g80Pks0te7a5nVuQpOb2O1GcMyYPkcRsXkaKzi2S+GkWSb1xlviskkjSzHCm8CNRdzu/'
'wAvYdmRfS0nDvFSeg3iV+YCTCkfIMYAYae/tcScR1EytxG1aJI9REal/Ab5QphPmysbLXz0H4i'
'62CyDW1wSzA/Quxd3BboFY9bTPBnvVrbDxs28ZrYAJlov+y5dyAEwiYPlYSJKb9wxIjZFpGuP3'
'MjRbHw3Q87R4GweqzLrODRH3TtnKUItu6ZmOkyCGYBb9DHnobUf6N+t1EP6bN2n7fBrQ0xdvZ8'
'sVmmygKhD3fgGpbmPwpEF7nhUfkAfXqsQqtKABGP9KaaVak2YT2al9/MwMHjluT9zGD/TeGm9v'
'mk3Pjngr3QBn3fU0uaAnF7eNT4yPJhH+NXxyZiLJEFuKnztxbHbi2HNHj88k2d4/6Ii3QeIGPT'
'6ujmPIGM5it24yu5qFDOhuCrKI8/TrdHS414G9lWFmvHczUTAV42F99VnEDQP55PrltJZM0q6i'
'O5BMID+gkJR6why0F/sTtFvTSODZ+TMLqo10G+z4GdaLSBTXirPF2pJIRToMDBkmAEaoozTa2u'
'zc+hp314j6t0x1C+wYQKRsJKcgk8PHckx02wg+HTyZ1mlaCayz6DQH42ypcnpvzG9d2OItfgPP'
'9NxAfKNymg9MfW/3uT9D2vRpnKYN7T+2PIX2H61bOW19yq2caPZxueoG2HauAbrKVXvzGfGWpR'
'qxsFlUpK0uaGuvs7zbzY9P8tM9F9N3F0orq1VEXXCLr9xUAAGZSW1HaeVFZKZ/9v5ZFHfhEE3D'
'zI9d3tCNKY3+9IFLdWSCCLprtSzzc43uNl1A8HDPFfEO10cF/SXnwf2yvIzt2irluILBqvxHZ5'
'FfovJlmwffStDe92XjbkzXLoLXx93SRWeZlS65M+3ZsDrGxFS8Zv+sp5rp2E3RuDHNpnVXnm31'
'dEeeprZ755b1Yds9ky3taXWH26fJuZcWaQRDSVd1hPG/w65CuVRXocvjbWICd7eZLv59i0D1Mt'
'Mbb9Wnag2+f3WbEAVwqoEb2Ei8c7UxV2/MzYbVnOm8R+zDS6m1XPZ5qkeeHw8e731R3CG/4ka3'
'Vl0tz9s1kP8Au8OdInV17QJELuW0S1jvQnGtaOLa/u79rc44FlJAYVq6inRqhKDS+N4NVKBEM2'
'UPNu/fuX2nm7dNPLcOGhgMbo63FOdIIChjOh8lUZ//F9gb8JqZZrRD5FlOsnu0ZwANA2nRTIDb'
'Wh0u/DolD/WMSjdEkm/0X0vQHnL82sWtXzuuT0k7RPcn6ILk6qw2y+ziA9pFkHHplzkQ96Ru7X'
'z8mTbbp5Lg4i4cUJXh7rMpw3rmSVOzrHGSNlk786WaunXrYGtmtbYzViexgke32w9yzFhaL5QW'
'GqukTMBIx8KDjpnCnkOg8CRvT53k3XGHHs5EDkyNjyVu3HzjLVfA91vcuKHW0I9048allv5Fl5'
'Ju4qFsS6wQmqSZ44GNIsA/MxW+wHr/XShsVV0o7d1Jb+8gEU2A4/R3z5AXQ7vOops54US3r5zN'
'cdObwrPibfPLZWyvNbDKtBL9gQa3VZ63W8I1xN71Pq8K3Oavuid7H43i7U2Y6NkX5+arfKMTC1'
'YG6hwubAs9z463sZl2Fq5jtJI4t4Dcyi9M6PM0zfZ6aa2xquxj4yGxJ8VOIQ+TVpRbKxVrHB7R'
'dl4vuud7fyWKd2z4PcUaovNnDYfjNmTz6LIPNG88dOOymmqm+EmiluypqrV1PPsLeLB3lW4WaT'
'iokdhCylSRI4DYKeh0yiWG9V59SIwH2/kH1nzlWZJI8nsHK9HyxxV/HsVbU5ZEEkJ7juEqs8Hl'
'Rnea7rjztrHp6bHxW+hasy/e/fyTwxyxMD46MnvsRbPTo1MvGJ2ie86euKfpJxoyyRKHuGiCnk'
'Ct1NnbhsfGZ0bH4YOaHX3hzOjU+PCJpA2jukcwDzzEP7WTarDt1onpmdmp0WMTEzOYRAfuVsdO'
'Tr8o6ezZEuemRnkGI0kO8LGRE6NJ1xX/bybeEl7vSdPMY+DRF4yOz8zOvGhytGmRO+Pt+H189H'
'ZaEC12YpwWuzfeFc7GHGm0VpoVfrl1YuJ5s6NTUxNTtM6EPmmwExO30LIII4C4qc9iJbQmfXL6'
'1pMzIxO3j9OKtsfdgIyMnhidofl04m45Nj49M3XyOH0fa4qJje50IPZczs4MTz8v6cY03Q+EDf'
'4JqN/ScyDe6345OTkyPDPK6D0+MTKabMWmu19phbeNjesDyTYsEKPTo7dNypx2YZcYtgEfuzF/'
'/ul5YydO0LN7rnhEz2BKAvZcFl/Cj4HORmeP03/dMjH1oo1bETx0G9a4nQg5cY9Pjo6PgBCSFH'
'Tq5Pg4oAU6PRc66AzR4/QYNn0E9/LDqR9HXzh6/CSibeTHZ6d+HCdamZql1+XHySs+ndUrBysB'
'tLfBLMfGXzB8YkxPS2p++gdt7cjJSUB34BE/WdoE/WPWHYHp0ZmTk8mlpCHs2/DbzOjwFBNNLy'
'Ndtw1jXQZLg+4X/j4EwsD8N+zXYbw6cnLGAa4y+h8Zmx6enKRP0E4c6dkad01OjY7eNon9vxp/'
'enJ4Nv6cGbuNTvrEyZlkmIT5Dvfn7PTYiVE648mxnjjuUKI4DqJSTkJzmjw5M52MABmMGnpgEn'
'+MvnByDF9/Ps62OauTKcx4fALkPXFyigaexudPTABZt9LvM9gP+5H29dbhk9OY5UkcgennjdGa'
'Rjbi4QVHpuMOYhPDk2M9Y3GHuKl6LmppJDS/XP7izX4W71bvBccO/cBgy7g9e08cgYeKq+Wb6P'
'9X5577F+8n0kq6kwuSh7qSKP4iQva6OWTvl6Nzhuxx6zV+wEcpaQM7TZvQktQFro5Nf0oY2PlG'
'+MUFq96MWIdFuhQuWAa0RtZxcN9QfOTns5KRx8srsDjTzAn63pJEYcWuBcgS/dWY454fIoYJF/'
'Xwn3PL1TntW6XgQwul02vwkh2qlVaqa6Uz1dqdNNFDp69Cb7gjh2LUy+LK6kVrxVOHLQduc5pI'
'P7cGop8PDqGXG6JH6iWaWnGZFnRKKl4slHmupBXVeTSfIcqlG3jwM2WgDaGNyENnz7b86gtnLB'
'KWitqgZ4XQviztVrRQBSo4pQaWCC2LwMO+SfMh9pRzYQcX5aL+lKMF/qI1WpTqGu5vX6OsUUEt'
'qEZluXxnqTB1bFRD+bYmuWQnh/IhNHREQ/ns3wjlS4LOkDtcN8hM0sMhfrtzHfTMhfT8gSQ60l'
'mgE1TnEL0OjhW7kKuYP4P/QoTeviST3JwfKMhR0Q5O9ULJ/oRtEJkXValNwiVsEnubxtvHqb8e'
'kiHI1uSiAJIlSH9yY3wFxxteQhO7PonyBwpWygjYl5M+JJMIAgovodnu1fYSmO2lHLI0EFSsoD'
'cLYyOcZyGNJpF/gSu51LRpiqfD+5ck+VQ83aUb4ukuTYWtYAWXctjKL1gIiMTT7cn/x0xhYhWb'
'WVwe4mMd1JRM9aFExrtUqpM7JejIpsk5v9Y7q7kWoyWny18cjLnYqHF0hqQ9FfXAbz6+GxgBh4'
'4O4bJ3lYNc2IJWBfDfl/xxrdLFNV5s/Di9APeZFrF6lya7gji8doZ1NsXqIXchHauHXKMvpWP1'
'9uY/GZ0V49Zkq+hDbDkVTCNyONRSp81tliyQq+jrHs1reQquWB4EvzkcDQRRfdK/x8L3uHYA5/'
'khunQelWw4iTk1dh+SgpcamlyxMbBvTxC0186wXFNgX1cQnySBfYhP+klDVTY5wsGePxxgarRY'
'owtqXQsoyWFBpzZwJz7qyHubX27Uice6KgxrTcsvV3xEHJiBtFRCXqByCyQ7tyADxIsd4Xixva'
'l4sSOpoEVwsyOpkM0sLwUhmz/k4sWu44jNSrC023wZ+bOsbvSus65Oi2LUXVkErb+rmfY9OR9D'
'hhkcCcIv27gzZbiSNu5KGYZftnFHSoRfXsG/HiUmOKxMUHQTUGgrJhjx07lkX7zq0rpu5sLqLz'
'GWLU1dac0wHSFqtFatVJerS5xZJBGKYa19nJaaRiYvVYn4SXAh/AwcntNYSvb9JMgbu9lFzlnO'
'2M2cv+UhEUF2KWu1jLGbk4uSi+NfiFzG2LNpmIvy/zESbUSObdDyVnmRERqbZVOs3VWZ0lPtmJ'
'BjZRLNTYP39nIVGiltUqk6tumkg8bi+yogPUEeGyZ6c3KJWw3Y1rPdWbQktmfTWdwbQLIE2Z8c'
'iN+a4RywE7TNE7TNf4cSPEFfX5Tb4fDRRcv84tIk8m/H8bl0lz7+jDtL6xqp/swBS9bQ8FC0ly'
'H9EcNwpKgExRKnK88HUbneK8aVwjgAEaiXSonlCndhtMpqWkpzwDHN2NVFqUu/RBJSqERsVeLp'
'33X3iET2k9bZUzjO4Ythz8Mtmq52gmuLbnPZarcRcnfwFliuGSAnVHJYptltbgss0ew22oItAS'
'RLkO1J4kaOknEU8nUjY3MBuU3ljSWBjadGjvi9Lg0NtRSwcW738jNynl9Im/uDtLmvz5zX5voo'
'f8DqZ9lltykSynb2/Y7PseHm1fw323TeYPDyF3JKyjb+Cxv8IrfBlpkLyAv1/FhO7ovcNlg67o'
'vcBlsm7ot4g98WKShK7mCu8tqI71uOCdgRp5XTTIcMmF7TkMTwFiyGV/aFUzR477RzKXctrXMO'
'D+8m3EB21eLM9Lt4c/voQ33KOS279w7HOS2r9w7OrvYQLKAnwANI7Q7mI5+LWOQsEam9Ax3cfy'
'1ifXchqDnpLitaXfJYab7I1bRSv5zlChLcQFzZrkrfGuf8xJzgVWHqo+WRmCAqkGuOlaZZqbIm'
'owX2fHMx1K+pMQ/wM17VbHCQCMTgEpFId/yPvEimkRVW778WqVbPd8E0gVmdN6ZwW+DicqlEU7'
'ilVMGNkbQu1zOc1S8rIBFUPgNKUBxSOkjhYPVZnXRNP5NVaRIEq3CWBVBmxaI+NL+45KjFSZ2w'
'GSw/2qLiCs5QX3mhT0VOmx4HLH5J83ja9DisuOPQpsdhxd1N2vQ4rPDd5POGxSipsab8DsGiBr'
'UyNofp/rpaBBZ5vsqDalLFUOtV+ue5Y94cN7PmUB2AtBIiMDoU9PRBngB4y2BjdalWXNACcOKt'
'BOGhY0968KG4t5+LH3GW/Dq2zzUgrc01ltj4cP0N111/9bUeSzhOWNyKSoU2PVC1FJYiRoEpyW'
'16oGqsJD+UUVAmuZteOpB/MFOYhNkiyLWY44Y5rukezbK2wlRVpMkvgQJOl4ueBIDCmOseet4S'
'kAr/YLTihxJ0nCpycS58S1UAvZzRS9rcjH9UKWI1yeRzQm6I5FoYcvWWltwZOFVaXl1sLAvV1U'
'gJKuP0WUnGfwHl4tvG5Nr06nK3Y3JtenG52zE5k8N3E51eGECyBEFFhVWFZJMf4vYXL3HdL8wm'
'04ApLfhLuRmsNZwFyjXsglKmWqZuvlSTJNUVLjuFO6BX59v0YoJv3k2sNnGwDoZtDyARQRK9mL'
'XpxeSHkn2k7P6Mnbq25JXSkuLHo8J0EPka1sGQetfCut10bX0bxRCSR1OuILcN0GlVxVgLZBU3'
'by3OW8V+LYTepvcWnt8PaecKAbYLMBeAIoCsqVybXl1eKU3l3maLbU9eHbHIfY3vVMKViKS2Jg'
'mAheWgAwdK7kpmoGM1Wv/ROPiApDE5+peyRmLWdKfOVTrU8q1LZfS2DVqYtWnmCU/uldaCp01z'
'T16dXiqST14dsf7uQVmAIHgfbFdYh/S4O5h/dzuzU9cw3J+OofCkMGXeQ+fkaOHFonDccS/9Ps'
'+31dq6HNTY1uZK30i9W6twYFXK1WoBbovsStv+kZMzhf4RlsJImsadbYZuMAfVaAx9pFZd9jzB'
'F9KSym1oy6gdJWkPWO5bQisMFPTwpLXrlYqRajudL0lFN8I/vUQC0x7jVcdKhkeNGpe5+o6tPK'
'VbMNtqVnQxn6fUhBr/Nbi6jn7Gh640czQqmYJhlrxNH62pR7gTQ2lhcKQEckJvaiKlQb9zcRxu'
'o6UhphTfuhS15COmyiRr9U0aZVwYxRNuNGnZ0qwm+5KTWl1ZitSeRsVKIo6JKeQT84ZBqRKjUp'
'Ab6gT4Br5h1NxhXRtDEJNzt9Z1ERC3bMxrtygBcctGWCsesRPfKb0Ld+b/nyh9DAL2pj3oSkp4'
'G0g8YHyg6IHYs2y5mNi9IqDvzcmbL02uOoATZ/QfpH5uTvLGKDqtS+NbrBugQLVLY1cA4i6Nsf'
'b+FBB3acRl8OeyxDYuSN6D1hvfgIr+f2ebmLbqVi4nkFUiW7kiqVyRhPtQxHERgbqy1HLFOD3/'
'KKVjW8s37qgaW3o7PXaqWr3TVffGm+NWzHRAkImURXTRCJzyvpFQca7aWENTE7ELaVGt4I6An7'
'yPxpLvIL+QkBpQeLpQomTk8s2znuoIK/3lVJJXqmfigjomWR8yciKkBRENA2nTjxR019e4RE47'
'6860S+20Z0f4T9w3flnE2KXEB09z1Ua/AdbUVptD7LB3aBh+a28AygAEsfHhSGFR8ht4aFv+XV'
'FovsddfV56v1plz6prwA6rgRW50ufpwANrKCGDO5rlZVo6r/Jtv25USObU+qK1seErId0MfRKa'
'ap3aFpLb0RzZUjhWXFDa9GuNbBldASgD0Ba6ptxva81Ih8QD+TlmDSiW/zKpaFpu6c5oOR1VPM'
'PFu8zZmrKX0vldGq69/sarr/KryEi/RpQ/8yCeM5TPX7ZVZKXv4L78W+3eRNcZx+bQM4dovLi8'
'dmpdnANF3je1E6OHtEhOerAvWH2fGQlQdRjG3fNZv965NizezT8rLREzXCrIQNx1EAV135FVWF'
'vysLRE/PPMZksqqv+Va5QRq3RdDSwhjxs9c1I2EZ8GbWERo3JY6zZxbsjNVRRKK6QTFLnfkPK8'
'zfg5d4CpclPnAcf85yTnXEs6G8uax30HZmp8hvUhdkKwhQarml9WlRh+Z2thoMP11Z3M6F8l1R'
'ytBur4hvLEg9Zy5/vfmSNXFiyiTU/RQEGpRbhWRVLbId/9NkLb5h26JABlAEJJ2g9lFNaefFbO'
'16ue+jYKisoV2cxz7+CUignIDG6GftwQ21iFCnoaTaTXecw10n6qcDFoCelBnpzIqJrbuKLrwa'
'2mgc3QvFCuF1fmyksN6WlD/w8xRLvIO4rgOHY6nRcLuOH6w9cfudojGrr+Z9MsoJ0Q/VlhAV83'
'FqCN/Hbn3xfZWtgzwhELdUd2tdJcteraCXDOx7ywveVq3Tq8GLWYpLTHuBB5vUr4KFWqDRA+eh'
'ZZlfUqW2RRd8ULr3Oz7+8DIx3SojDDZcQMlAEIJdpuV1Bn8iie6c5f4wijaJVlz0OqJIVb9Qw0'
'SxboVI9aGzwDZQDqInX1M7YhueSr3Dc1/1NuQ/osjqaPyavKvEYqBzfW2JzNERowswQ/plRS+I'
'yWS6eLWi13Gtxper0OCgu4078W5m+86uobr7/BLx3Npb9qarmBMgCh7diLFdQlvfG688+1hRs7'
'4Rr3AWfk435ek3Xf65I+eyH2u3Lcgg7Y/1TEloHHodG+PUMa7f/vVX5USAQLeBz97LbHP8qmD9'
'b/uKHdFfnvRIUwW9k1R5JqL7USW15WCs+dnhgvIPlD1Dflm3VhrzRd9qWG9h7Wnd2SU5rzYpmL'
'rQU3OumGywa/M3wbqKxLo5E11TUadWbzc+tBKyFuMEMM1vwUrKM3xAZecg3yYjXBir/VvrDL0E'
'CKAiOC8LOD7zjt6uL9e1N0DcQt+vYlTwtAWYD66Qr1OkNrlDwhbPSfmHBI/KoI2MQAHWqwzqSP'
'jhBcg0557CJJBu2fIf2F2TwPuuSqb9o9nnBVA1c1QwqbrY0MUU3/f6OFAedvZeEymYnHLmzWjA'
'9C0xUOb7BaP+ENUu2qbT8RucqO7Wq3fkJY8usihWXQTw566j0kwVWsj01a5X0ckdNlCatrEUeE'
'M8p0ArtaYXx4po/LjhKVcgU7N1xQZFnL241Nnr4GCgD973UB7WSkT14G09zt5g2dhaG5AMT98b'
'q0umW7WoBfKT0gbldQNnlVhmvrPIdjKLXWA8eccE24tfWmm5NveFWstyIvmycsvTw0fXCfmwHK'
'7L8qPU+o1q/KuO7u7WrtfVWGS9/OK6gtea1swhTXznKarhaptt0oWaeRoIHURqucJAlrI7Zgzm'
'32mVdltJyuQDsEGgegCKDuALew2r5WcPugUU67tME7gg6yqVlz9rB1+uq35snE3mrGEFnIHGy1'
'jLiJ70q7uFKltFjWCLxwINrAgQJ7G7nWH8ut6VNGkd7YhhkN+QXCqPv69JpR5f/1WHM+AHHDPm'
'sf264WXQIdTq6Kf97Q0JG8CUM9K/+mJjSElafrZ9843TdiUcvct2vAdfvk8ukyxGaLC4tbs+Gi'
'4hikXx+sd29KL7lDpt6t/Y3bVY17E+LrjgagLEA3J8+MhxTUmdyHkfL5iwpjLaXTKdFbjfBgGu'
'M3aJxnuYFhGmNoEoAigHYERx+msfvQ0mBf/BrDeC55G957ev70Jt/XwFkcn4raR6rhxFLaxdnU'
'BWiEzK6Q8rx6Cp6weoDUHC3sbWmk5mhVb8s4k2i7amhvQ9nFSwNQFqDLSfL9mhzBd2VIPfp9qE'
'fvzBTS1VNCxaB5tSiGr9Z8WXSr1fETrXzVfJ9S93taMxISjoMWk+GbLtKD1I/5O/U1UnAQGSaW'
'/2CiZvOTAYMn0eeJTjfCzFL2y+ocixg97GuQ5vTOEryYZY5xhP4FSiWcISTkTyL+OyOdNFEf9C'
'OR1Adl5pEKB0PIuiK3IAIAy3sOI4I1SlyfzI3qkMfsQ58SdOlzdhg90cVBe4kyVxikF4vlZVO8'
'BoKObzw9RILUfSgIRkIwiDHuDlWoeGXvMu2pQ9357/HCpkMVqvdkuHinB2UBQvXO12YUFiW/mm'
'E99e//t+upSlz/HlXVDlWmGBfvQWePHQ7aIdC9AYiRZqpqhypTv5phVfVJw2wmeZA5GndHPJsS'
'FYZB9KOg2Arh/aDeYtSRxgpAsV4K3Xap633gBvAfg3E88HCW14Y2HayJJwQ9O/V8y+kOvxk3fS'
'31Ld7wJoaw8eupI6QdnYIBdX7uGTlD56M/dqj+yFtAO3OF2yroKg+mjwpOwYM4Kt0BKAvQNrrS'
'PZtAnckfgCd/FDz5MN8M1Dy1iU6WblABHgVx9gfgUQn3COhkFvVhVvR4wp16thn0B0aBnXq2Gd'
'oZgCKAcqqUderZ/rAoZTZ+lHwkw3FxNj4onEEfNkW1U68LH0mPH8m7qLfsQVmAtgfzzyR/iNcG'
'3PhAOIM+ktHLnkA7BLo/AEUAofmPB2UBQuMftFbOJQ8B4Z8Ewi8PEB6oky2QDOn6EOYtjR5yjO'
'SHWQ7wJHOKZAY9ZEjIKZIf9lSRUyQ/7BloTpH8sDBQG5+70BmSc4pkBj1sm5hTJH/cIzmnSP64'
'R3JOkfxxj+QcI/kTeG3IjQ8kM+jjhuScIvkTHsk5RfIngOSDASgL0ABpsTcRqAsN3S5I/hhIvj'
'JAcqpNyma47pJGdTlt2tHFuP60x3WX4ppBj4gd2qDtAs0FoAggw3WX4vrTHtddvJ7P4LXdbnzg'
'mkGfNlx3Ka4/kx4/knft/tuluCYQ7r82fib5rN/LLsU1gz6TUTW0SznIZ/1edimuP+v3sktx/V'
'm/l10M+Jzfyy69OTLos7aXAu0Q6P4AxA31bC+79Ob4OdnL5/H15M+wlx/Mcrn1lLvV6vtKQDq6'
'SBOzX3fPhF3t1Z8ZyXDtNKnr+E/s7X/DlC7NPy3lz1S/LX2mhU/TYvf5zQMBKAPQJXTXeGeksC'
'j5Czy0J/98pkOvrEs52aK4I1h6a5yfmiV5CUOudlsrU/FN4tmwnB3NJ5FOzXAkqW2X7Rnz1VUV'
'TGqKtklHNsUdASgDEFKJ/srWkUke453L/zcxbJ4qLogPW80fi6T+wXvFeQewebqCw+KXDAzNMo'
'XYQoD1jijrIxVxgcNJwyGKCxKpXSuuVV0X5tDWFftuiZxEelYUqtSWmPBarepiuAOkYN94vXsC'
'EKMA8WsPGlKyyV/xuYVxIEAKxw7I0Jug46xrjs+96H/xAnHu/srupQbKAAS+8aaswtqSv81wBM'
'srsrJArGy5urSUtmhLxNOcPiDXMrQfY667vG5rFyZMP4jZxvroqlVb73K04hLLQvVHooaOXEuY'
'0It8Blyxonrs6f0Moe+U/LpuIQEpLOq9TY52k9m9VizXRfGjU14Yabh7ynxVUhDEKUgvTU0eL7'
'y0UV9zgfI6rL8oYcwYbdAR0rW8flPhdN2vfUmzVc5r+4DEEqrVBVsHwxXvyrYAlAEIoTWfMNps'
'T/5RrG0fVDdQpbxWlvaooheLR47tfy6IpK5BJBL9Ijk+GkYrj8der3YxE7Bg6fE/74UpuwYKvW'
'cw0P9NIbYVQg7wenYFoAxAUAzvUVBH8j0h15c6t+qpxhqPDZ+kRMYX1yXxMu1xrag1/CmuIJgi'
'LuvfS+8LvIPfk335tu1LZ/LKLN/f3x8VnlepnlkuLfhUS46iDi3QLMMWSsslkWFyzWDDR11zTs'
'RXTzAicL7x8x3WxfPcqQ1qwtD/WAen90aOqcN5vVSsWVx/KsCGyPoM32P5HnV+3rvD1x6+4XqP'
'GVwRXmnN0gyUAQhK0P0ZheWko9ru/H9Jhf+UtU+hj/9CkvAabWN5YbnkLYzWf0LCZStVlTJwez'
'i3n0aqDxVuB244fJZZThXe42AAFp8uaggWHc7PqJ+/bL6VKXC1Kk3HNX3uJuEvPLzWl2SriJMO'
'ujpSOIrz4oP1WMxJf7kMd9gyELeN66FT8Z+MvrqSN2ZZUP/keWGxtpb2Tp9zaf4hILKpDrc8Hq'
'cOCvMH5C64aUPD5knuDkAZgPaSdP0tW0ksLdIO5H/u3CvR4+y6pmtexQLn4XsagXA1Rrgxw2IT'
'FJhg8r3AXRBLC04VSx+3UHWIc9w/bR/pvb9vi+uWHmUFhE2cc5vcYs7OntKTtBAS+lPeRV5GnN'
'4+yPAhezBlGBO24nKwQ1u7LkB6qmXYl2Ag7u92EV2JfsHWuSV5J/dTzL/5fMixyl0ezGj0b0iS'
'W2g178y6CEUBZQDaT/fA79pqtibv4a3Nn1FtHmJTZOq/zRk6siKdxJfLi6X59XnkpWk+s8NaKo'
'dgSA1T9VJwo0EWPxyvm348Hf3AC4fRNZvSF7cSft6DDnq74+8aC9+W/HqWg0O/mjnrLaG/VRmf'
'g5tpyzEUxWXnmw4iap7y1SHecHdocXWY8VpiauugtHKSj7fypcv/fL9q+XnQwznodxvtD6P+wg'
'CUAShPF9Q32/5sTz6QZQXp/8qYhkRycb48R4LwzvLysmlhRsyhk0UjGrnBnB1jvtJZ+Yh+qYs0'
'UHA1lti3hOdjYbNsbiUlntlNv6uuNFDYUGjp4L/h2d9OuPtAWj3ZTrj7ANSTnvgDcLzEyYfQmf'
'SPYIR4T+QtDIHbxcV6B95Pw6ButHNg02+nTCkbnhyTXjLQva1TDM0SdvQgAVTi02UB+KkP9nQ5'
'+xwuC60PzL/Pwiv61JwFefShrLPPxmzy+C9Y7pVsronVnMWgD2XV3CTQDoHuD0ARQKg34UFZgF'
'Bw4r9GCouSj2U5kPu3o8J0pbhKOqSLLT1mQT3gDf4+xsGV3rlxptSHiC3RjFhPq9s4a+nkGnag'
'SeSKyKpYBjRlFpqhz0ZmNfj5JwpEsVzOiesfcYy9dHCcV2ORYQaGOF4LLfFKt2b4ThjaHoB40R'
'2aAxGrIY5ACPt+roIyyUNZvpTdWJjRSE1JyNP8336e+kGN16iV6aKtWqu/wtvUcDvn0egb29xH'
'cQliaAiKANquRvVYbXgEwt3pftu0bPJJZiD5n0gl44l78xz5eFovZNN0PG+ze8rZeLZcmBR5gj'
'TvfW4hCEb5pG/2Gqtp45No9toTgHhx6Oj+o0hG6U6+hOP8NzjOj2YLI9b9tnBc3R7Dzu0xTXID'
'R7e/6AzGRGnQzudLB9VfWCtxeKRS5IBGUnJqnoNKIt5QuV5d5ku4lBiLW4YA8kscTy5uIBF2fX'
'UNehGPtoK0RKvlRqU/YN2xgm5s/9rRXPo9TRezLDF8fFA+HuPSzYYF9rDytJAAxlJcKw/Iuvh+'
'y8+eBgv0xcckb+748LSuxwxH3JytSKRUu7O0zNsAcrLipy426EiP6Ex12r6FxrLYX8AaocJ+Kc'
'vhjR+O+G/wxq9Ig+Vfj5yuarlILlzfIVmfoK1aLCHXrJ+2ZrVRP3XQOdC4nJZkxKsUQhB9r21B'
'LzazVxu+9hak1f2QfecW3pfC8eVqY8E1pOakfJhJTFgTeoAcPSvdys95GbQ6Mb93q3viK/6sdC'
's//4rv2tyt/Pwr0rX5AUNKlDwmSLlflPhTpbuknEaJK8WdkjaDHi3F2vwpVA3S7SzdteoCVDiG'
'bvrW4cGr4kL/NYd9a9I6Kwv45ci11xX6r7sm/GkgVZyg4BqNSMhanHJ5dyvb5il/xTpDd6v/5L'
'E0AiJZW4gAsO3HBAG/m1VYJvm28MZfznLQhXxcZ6PLTpt2fY5L6KUAstgAU1xeQt36UysaqrnC'
'fXA5rMD9AvmVGmm5Wq2X3IgLkv0mJFPoJVWHEDfY63t5O2TqV4fA6FzBJt6DIW5Y21jkEQbpTS'
'xEaHJw6e7yajia/uonOGT1vphf+aItSo9Oyx5o2jOX+VkSa7dN/Zbjk71iSaZ/uVG0skTdrQYI'
'sygLrSjIlSYMiUgzJrYcUAMkJe/fYyE1wET67TQ1gNK/7UVHt0rKb4vo+F0chy3JP0B0vKaNRM'
'd7Iyc6wCBS3kVutcyhbgXuUaCJJpVmDTsdPwfDkI+Jw0qHj5/oC8LjvLUbAlRt0K4UQ8y/sMlP'
'rHpSI0GSvcHxcM39hyzXSvw4r4Y53vf4Gp//QORykXijTVSn1lVxIUccLMG5jBK+VCtD6LP6RY'
'cSfMDauos0kUKXuGU11srLhy6/B18BQ7+3l6XXxMhE/+niQnmF2OfRwoly5U4xhc03vB7i8njl'
'Yxoyo1u9RTkfL+cfrF/9FuV83/NbvUU53/ew1eFTWYAQhXdcQVHypJgkjxRewK5BQ4PZTJrqYf'
'mdCuYEZsTD0OB59zUwoyfTc4rke+bM3aLM6EluUx+/IaOwTPLqNmZGT0QFtFxhNNERmSdVZb1g'
'jWnsBqx6Qq2KvJpybSCIh7HFqGFsrmQkq7bmxQKRAoJPw4ctLsc9ajVt+Au6QSjnBB+ImxZXGm'
'PZz3zALj82aNHXYDVuOr9MXMtF5SD5Xwr0eE+D4RfHmzHyZLjnXHChLYVf7CeB7Hhv0eNNIBzv'
'P4VmuDW5rw11g3C8P9pGx9vXDAojNPgAeIdPrdQIpJPe/IYLTR2HOB5MEy20iJ6roKL3Utx4pP'
'CqbafWuURr0nJ9LfURV9XHkllmTllUg3+/7Fzg2KdnoD0B/fjMQy9HZ5+XuwJw8hqcDdy+Vm+1'
'4ceUi3mKcgWRBvRKFnyS6YYN6rFfHFtoOeDc1Tviioi68X4ANV/xS4hlDNbuc2HE/eSeM1NcXS'
'nX6nPxjxzOqWvh2mE8QGXBjm3d/xLs8gBfa2KYeIWeRbx5vRQo9HFjSyXZSMuqUA1QOS8MaERa'
'iOB5Pv8JxvuWNvYEPbtwUso0VYJ+7zyFoaBm6wnSuNc4u5wr68p0rjl8o0WGbVX2x4PSp+SStF'
'XZ31v8Udiq7O8tbS4uZauyPwLBJfPPkcKi5H5hNV//F7KaVHgTnLbVDexmmPGJu5CZd0j/A9mu'
'QIj5kEiJuCdaZLlk8eTqLaFR5CBhR8AqJDEenOcQTYn9A+nNJn0GF9Z1aT1fD5AJvs3LJ6z0OD'
'SBb9+fRmYkeOpKPZUFCHzlAfCVbcn7wFc+Br7yjjanNjDW1BAQ9MFim72Eyq42AvZAZ5BbJash'
'NNYQRic+6cdGaZmkZ4tcxsM33HD1dUH2FxKISZecmHYBJHy9OYEed3opWystobdSYZnOAbxlhd'
'vLlQX0LG6ZsXfdtTdchxGeW50rTMzxVeXsr1x/9ZEbrroBr4xIPCU9TmK/cRc291yfugbvzS/V'
'qkix1fdafuS6a6658WpOygUbVNsxGnJxBQcuSeenyUxhpTg/Mb3ph69lLKE8+ZkSVzE/hYoN/l'
'AgW7jGkS5u1AFdlh8cd8AWe3TVVddcdZXwCxh039fGmloP/4ky0L8KuvudtiTLdMYwhOy2JZ1E'
'Zy92ILCWX2tL2pI9+ecUbvWeYb4sDaSruA4VgmZs7ByvSsFkPYQQbERfu8PB6Zs8/I4mcAZgRA'
'3dFICj5Dfw7Nb80wvj1ZDIVWzIlYQVqQrf4cIxI3s91wTOANydbIlvCcCZ5LfkU0e0yHEpdaiI'
'h9eqdIQ4e0X1D1VBmz6LWf/Wxs/qF/DZ2wNwNvltPLszf4w/uwn93x5yJFMXNO9CZ1FvmgYsWT'
'z0tiZwBmBEFXwmYjg2/HfbWIn/UCRHWMvCans0EyrKRSQkm/3dGoJGd0O9Uul9Kw5Rp3da1I7y'
'DrsiC3GxbNUbtdMQBOZCDwU+kb1UQaAflzlN0VfbtjLwzndl/HebCrPfbaJ4EWYM7QhAEUCdqv'
'1tU2FGIOjyNygoSn4fr/Xl+87JfpXobSIQBPwujZh3n4ARmKE7AhB/pCfpDUBZgJ6WPD1+tW1W'
'JvlIGzu2rybGsFJW9SPASrVipZSLXP0gRM+RZ4dFkNjwYXsGQYdUho3CBCHlMOHOp9YFxZlnQh'
'PsczOG4szQzgAUAZRj75KBsgChFMAJBWWTj+K1g/mbwmVxB5+g2o70J+ViVmxs5+SuFrsPey+P'
'R1854D4Le+9H05PDKfkoJnd5AOK5oBjTfUD69uSTkL4faCfpe3dq9/uQnNOnl3PRQkgilOmud3'
'dw40y7cGBO0ds5/8QWMA5AsTwOye2L5RRw5zA2OvZ2FcbM0gkeD8fTJ9s4vee3eZp8kD/Xxh6C'
'n48KY2ZPU7svihkWV01fRokHjmxydyya/ggcUozXo+FtSs3BzN61BjhcKlppv2jBKA4PhQkwpj'
'NlBBn16Y993nDjhIds13Y9rDxzWpBklGxXFxJDuwJQBFCslpjtelgJBG/EH2YUFiVflNP6m5nU'
'hsFoRHuSNpucr7Flhh3hKWuLmFHkiNSri2tnpKKc7GSlVIKJs7/o0qfn1w8SwQaXGqk0BaRpua'
'E0yjWb0zfDHXAmldhPRAOMuRFn4NHyScjc/Kq5sPBqaRP7jCbvbVfWxYgMQR0AdXOAioEY3Rcq'
'39qufOuLwrf+2Wgzk3wZQw3gNjDtbrCpRFhHonol2EikugnyfAz/xXJZa5QDoWbbh0tkDdLEX6'
'9xLeQyLVwlt+Cv3BLUpZGg5o1udaM7JyrDa0oTJsEsv5zGJHxtX25z6bTblVMSaL/y0+3KKQmE'
'BJB3GCazyVfbOCrmdRFX+w7OnojaUxzPYcKbq1q5Q+qOX5PJZME8HoUJOEw1ccofXzoXfb5PcZ'
'+LawpOs80ZzPer6cWC8361jeuTelAEUI9eNbcr5yUQ0r5/zhbblnxNdJM3oqK7Xhk3EIaPR2O+'
'7GZdrmxsNFDjSFS9lAeqd83xYFdvqAlFGnjsoraMfbXZLGnyF7vVoKTH1/xdT0ARQGY3FFAWIO'
'gaX7U1tyd/3cZZBZ+NUHGgVqyUEEZlPSx4JexEczll57m7Q5wiZpGLfAZY6QrdL8zrvYnJUY4n'
'jPgpU0YgWQI6QY75X6fppF3WHtIJgnf/GnRyIABlAUKqxB0K6kgeF8l3oiANrwvcD5AvwbKboZ'
'2BFGo6GyXGxEBgNLCmBwtcNCjYYeSG8wfos5e6eXSgaIvXJwQUAZQLBBTSwx8XAXWrgjqTvxEd'
'7nrzgYTfHSg0t/O2SnPcSMpVWeCRaGI81uNtwUHqpIn9jVdzBRQB1KlamICyAEEL+0sjvVzyhK'
'hhH4kKI744J0fIhxoK+/GlCr5VLrTkZK6IlZabZ6m6OTw+MqBx40EvFylP5T6vdkypMFMybpf6'
'SivOTPrutsJo5XS5Vq1o8xdbO/LSn0jTHvLSn2hzeekCigCyUp0CygIE7fAHFdSVvKI9QX+Q28'
'KPeaeGRGGuyVFrpVm0nrv7ZBfNlb8QgjoA6g4YCeJiCbQnmH4XzZVAB4gpfbdTYXHy0+28wV/t'
'FMc2G9ycMY5btsupKRmDKWrd31KLxbkoBK4vK64sLLe4iP1dqS5ARfSYYB1ncnjm1oHC5Itmbp'
'0Yl39bEYGYfxvkp1p9rS4ePu6wwtXxfS2qcv1oXLiiMIMmNHVY/yX7mnQjXtigXZPtRzx8mwX1'
'sO2wOEcssIF47SsKk7r6sosFtDuV1M3RGzHNkaZGJ8J2tFoflO9pN4cCbfxqUeIH+19y00tSdp'
'2XHOW/Jyemx154MNYatqhci1PEvjFnt2zeFsbDunOnKFQbTq05FNVLtG0SeR+HBT9ZhtHkZ2Vc'
'xl2h8OJCP3395l7sQa+2Pqnf/OLexWqV/uydK9Z67zg4gCcL+uTxk9MzE7c1Py9553i2cIeefz'
'GrsseD5sRfw1s3H8IEDq1VfV0fOh8kkw/RN49u+iPN5OjleB/j+DlsPprMiOYyyevlk1ZcPlNc'
'r6ckFaNKrLxEKjTGwqBSixRL5PfYxVFcgxWQiaNUny+uehXUTNj95SESt/aGCyJ8ydDQSw6Gdb'
'S4uhZ9evg5M6NT5v0EOb2E9uclm+qcUh5ZwtSkV4ndyyqSBeIKPBsziImL/HSai8TERX66PcXx'
'EPBHoHxyWQDKAvR04nivNTHRnbzDWN6IFtJm8UBa2UKzBcEFBbsIpYB1h9bo8yurp6ZoE4DdtC'
'yeC03xoJtzN62MoVsDUATQtmCx3bSydwh//Clb2Zbk5/HeEbrq37ahv5sW8Q0cRyrv5kqLXGrX'
'h5+H0bF0JFlSFtfczV/ft85IRBkuipWkVldhprwioUa2zi20Tp4ZTfgSt4IttE6G5gNQBND+ZC'
'AAZQE6lFwl5aUB25q8B+9diVpz57FQTXyqk3JIW9pfqWrsmCjeC8iaqMBoQf8u1WoHPULYh+iL'
'LJ5qaBHH8poPHg6wUyrWWC9YiM+OJAv1DTC0FTVAsCZa6hG39q2EofekMcTh6MDQ0wJQFqD+5I'
'r4DYahbcl78d5g/t7CeBNisOolu3iyGdJtuyW2TY/dgna0hoi6JhIVY/yCWOchDm8cGjpE/wmK'
'mWkR4UGu6I9oH7e+bbQ+nhFN9Eo38220vvem1wfr/3vbU9fIbbQ+Al1BRPFJM5JsT36jnd1z/z'
'lTmKk1StowQO1O1ZJ2Z5mXZVXWXax7WPGtUlrjaqG8pXWOpRJFVowiVi9b1HCEgPurtRKQ9hQw'
'KBs6kHgQqyPTguXrDZ4J+mvIVd+llxh9mDTUGz8tZ7Va55oiJLSWS8UFDTLSDrshcblQDLERSI'
'9XFlc8WBAe7CMniRuFmvh22h5GKOF50CF+O2niDO0IQBFAncEdZzttD4F2JXviexSUJA8Kh32p'
'Cq3NDHesoUnInJQ6ldxwEF2R5gsdb4rXOBREe55D40xQIyQtKxIUCGlP3cwSFAhpT93gExQIEY'
'76VzhHSfKh9uSC5POwnX4hCmJlbWdkE3x7pEUXnuptMdLrslFx3YWCkoWLtaLkAmn+6O2pPD6O'
'zeaq59yqehnUm7oiC3b4O9aCE+c7Dp6vVqAI1H20vp/7Igk2oNxMskAIrRd9Nb+Y4b9hkv1YO6'
'dqPZxhTWjVGjiEThW+160HhuSxoHcH8VpXyxLyRko/rWlvYW1cMRTcltSPUq5rEmOd14HAigqt'
'tIHQKuHL5ZWV0gI8WhwWWCm7rjZsP9MA8rKLXE1Nx3zh9VJ4UfPhhndWkPnZH3SpPjikFYvT56'
'sRzFUuiBq3TId5pSwJO0QUQYVvPXSJGo4Zv4R2aaCSqOGYoUkAigDaoekIiRqOCXQxidNVBUXJ'
'w3jtivys7IBNy2GQvZ8SCQxRzrzeSsPy44iZ05BFblAGa4nX0jXFKlgATKz8TZpJwU0NVtaHPV'
'NP1Mr6sBdaiVpZH27n8kqPGbllkkfaOYfqjzNyFoouEiaYt6ggmqrq90y2lo7n6WJ5OShw7WWx'
'LM0fDhCK7hctULu7832OWzJV+Jh5KhvSAyq/BHjyI6bIlKWKHoU+aSlp/WODIzqQchqulQJrSU'
'B+Lt2Lyc5F2YtdgR9+TnFZm82drpbRD7xSwn7B0697HGwcLJOMatqBK9yWwHb3iGf3iRp1HwG7'
'3xuAsgChfcF3I4Vlkz+TjfsqDC9LJakbUHQOJfEKsXsOC5/0Jz+4TLjCkCbigEDdlVmGoakD8w'
'fSx1ZWqxxlDipnqxK8+ayoFYImB8W6VfiULAttHKQRJkRTJHKIg3KlSR+ED5QiAArbl/7UIrAc'
'IBLWYl46YeQihyIYjBmaC0ARQF0BIrOCNiDyR3ACdiR/DnHzax0kbn4lap0+hUlzTJum8ArfQR'
'x1sbLRBmidIGOVTIv0xhnYDgcZC1KWTVx/gSxR5trgbCtpzOZsjBbhhR5/vsBM70hh2mUYHJXL'
'lBy+AeXn+ocUIRSLm0s+nJ+Hg1Sl0A7C05+3c/DH44wUlkJfBzKflv9SpnBC22IG56fIob7G0s'
'u1INuhAPPFCkx9ntJcWLfr/VhjRewpCfU4LdU3Fepi6QCj8imqJLm1y65cJWLuwyYSomS9PlA5'
'kMtvuBpw9VJLXlwYXoN6qI9u8FPKR4ot2n3f4LMbpYebqlA7VDJ93atQO1Qsfb3dFY3doWKJQL'
'uV9+9QsUSgy5LL45KCouSb7RxIPCMmO/PtaN9QF3SVbveClUnccWB9nSN9HUahq3hRR669Vk/i'
'DpVF/CH6/NPcfBCyxtDOAMQzyql83aGyiEAINX5Hh8IyyZPC0l7X4TQ/JuwJVBo+AuLnYsLF5e'
'CiyWk5TPGo/sLCn8WL0joT4aTRmV8Ym5QGC30V2sK+o0HRJatNnjopdW5otrzsu3Cpk2hIh0EC'
'+1F+XVLZuYqFvDvAKnhduCBxN5GYvnSyGJDESlbQpdqwpRUSrDowf1nHLPRDynHrZVdcQ6c8a1'
'NGQ0M6LxwkL/8nLb3KaweH2GqC9kWcdafGE25+uFbyQcJNSJAARvBqpxEHdZbMOKNSZiBWrQF9'
'ksRJ5g57VYyVdBHU6xMzEh/p2ox8GouzUxYEY8th/zUOh+ivV/ko6uniGIjSwsEhp5RxEeRKvV'
'Fe00D+Erp7mpS5mQU5O6ask0hxk8VX4vDjaqtlRWXTnAKu6CpI9U1hHfOa1s8M21o1jrm8lg4R'
'3aEqBJ8QOji73UmCCvGkl3w7VIV40ku+HapCPCmSb0lB2eRVHexgP+DbhGpceFDJ7MgNwkPCpE'
'xi70VkvRz13eLYSeNcPnIrsXlzZW986kmT2ALtEOj+ABQBZAUNd6jEJhD82T8bKawt+bEODmV+'
'QyRz472B0Fgo11eXi+uF1UaNyK1kJH001E9UKy+TXLwLFxiikfVqw6X/ImVMqfRlnBztOjjEPi'
'2M0woGlF0s+TKrtuI2myNNfcCtBQ5ehuYCUASQRUbvUAcvgRAZ/RETx+3JmxlT+d+gSyE+CNaM'
'TsZHeU191rK6mczOpOL9Xdng/jLxKKixK7ghzVt67EGvjoH5SnJoox6YYLh5svcywPZUt7w7IN'
'Ve4+SJmH0TFt3CvUtZoSpKzoHeAYecxV+XpRszILwjTFbFTtRLsF0IxyjdhXYt7AAsnctKsUP9'
'x4zHEMSoNSvFDvUfE6hHI1Z2qP+YQKjGNqagjuQ+ocEbEIZZ8wV/DNOsfJUXXSMT4blc8i/I+t'
'6hvmIejD7hzwJ8xfeliQW+4vvSxAJf8X1CLO+w49GZ3N/B0r/sXSmmRnEpAgYbhA47DnAvzbe3'
'gPayroIs7/jYCPaL/2yscDh4v1omRWr1He4jsjnSKwfxVLlUw/5ID5R06TS3Xm7rhxnSxHvcSu'
'CCvj+9Xm7r1+EylHaoC5pAUBt+MquwXPLuDnaOfy9TmCyyBHHsVYS5zhzNkRc1UlWMr2HRMds8'
'p7VybWwXBRLYxazeQ4gJQUQgQBGHGlxkUu1SG2tVcFIpVyf3B/bQ+F6roq/DiC72xpjNeSQw/T'
'SasgtsUpCUXpQiA6e84JYkJZu5QVJxQW4knOBId0ZcYDgWbJVROKtv4nC7u0gciP6irYkD3ash'
'v4evnLeEdsrLqRxt8LvTGwx3+buxwRcGoCxAiDF4wgi6K3mfbPA3otYbjFvBU9xchwkO8at4wz'
'zig1UrEXNI+lPaMDv2D6d+r5+begSxTcRzVd9Aoe8IqyBQyg8G2IQ3n9dPaDng8NRF2HxfGptw'
'6L8P2NwTgLIAoRbUtILi5AG81ps/RrpH0yGV2WLyjbl6Y47+XCXpANYml9C6C1BVXNoc4SvkYd'
'8XzhHuwgfSc4S78AHMMXwqC9AlyaXxO3ET6Ek+1kF38k/jTv6GjsJkY26apsI5/Zqa3pg7NO1m'
'x7ES0DT5MqDTRO0pcWDLbKusgw+k7o2kMg+vro5yYYEN2h7LEGipcaFWJRX6kGBkiP5nuVzH/Q'
'JgI5wW87LvF1v0y+l9zsTEseGpoeLqan0VdWSqK70DLTVuuI1wR40LwYc5D4RWpcM8+9k2zpK+'
'btd7jBtae8IuO95TirlrTQZ2mMpK4WKqHxJnzqC1K8ZYOqMWy9q4quZZy3EAeliNRtZC/dDk1M'
'RzDzGwfmhmYpK1qqMx7ixLPDdcQYspiqyjUPhgubgyuFpdLs+vD86V5Sz9B7vp0Dj+j8FB/VYB'
'nwrhKyW+mSvaVAU/qmg9C1bDMZgOWtOIViQ5P0yne+ksKiNINQFQe00PTNMddJveFi/wn1yeG+'
'dsZ3668JwGjCx8NCR7uoqb11po7vSnRHKm40Kv241nlBeeabuBf/fqKe9REwV/h74uIlmg7QLN'
'BaAIoC4tC9OjVgoCIY3kjZHCouSP8N7e/A9bDJ7WZdFMNA7H6QVmZvmu24swCiLghjSEUc6g9j'
'rPeZ2afyaI2UuZ1FqG7fWoRYPnRFPd6eYOi8YfpVcYyeS7Uk9lAUIX9nEFZZJH8Nqe/M2F6RYr'
'QzoYrlHnXJdNj23IGJG+s9d9mG3I6emxDRnT2xGAsgAhYeoJEtC5nck3wGY/00ls9n/mQr2ibD'
'Wu+S82KoemyqK70IcV4IwdmHoZjBeUrcIl3UJh+tWWx7F8B6GHHFKfKt0i6JJvik+/qm+kXaWv'
'qRVuc0LMqNC/GPQmHn3h5NjU6MjBAecGwFd1ELEkVHwCRuXfjZJ2E7+qXWTgBtIpi7EYE1iXdp'
'eVkqkmrhiumlCKMk++v0rRtxRmRGNvWQxvSIUlMuHLNKc+SYRelchL+d5ytcqe5KL2nXEXAlK9'
'mjXhwMVyxgrTmGMBh2Dq5Pg4/qmepUosrmj6RR9yvlktl0qYdeXKgOWimY1nBffQV2U0D6vP6h'
'QHYsLo4FLVaWl60eRJzs6OnzxxYnYWiu9amblvvQx5RmuaxQ2XPQEVqVAobs0+KCO1Mi0OpZob'
'WsmLpMAZmAKkdoCfbT3so8KhLXVQWyzYqpWW2F2MANnUSIj3WINpooYQ6zUp/FNeqlQ1ydnmHQ'
'cTR3iZZF6BlcJG7743L6q+DpBGB4eWxJsLqrnyEttCDkm5bHpvEHxrEGxs0H68DKDgc7ETYi1k'
'2E5iUN+ADOvhOnE7WYb9zw72YO4yAHE7BtGDYgHfqUZxhm4NQBFA2/SysVPFDYEQJf8cBUXJ42'
'LuuhjBUqVCytzlKmrQXXa3BlM1TdumBRnBI9H4efdBeGAf99asnSojHvfWrJ0qIx4Xa9bPRArL'
'JH/bwYFVPx41TcwK9rusH6mGSOxywMKE9BzNTrxgdOrW0eGR2enRmZPcG3CEt7eXxxAyHtTLC/'
'/VG+RXiulWj7DWb7HVclV2zO9xs2QJtEOg+wNQBJAV69upIodAKNb3cEZh2eSfOzhI6sGMrPbM'
'BtNjEUXYuSQNVEv0a/b9YrXwgrMbcfIKMhvqhRSnC/gjhrRakXIbkAj6oIUVSrFjLkXlfkGBlT'
'DkgqQCylbCNqW15Ou2D8YrSJeaNmOkiz0TtWvzr8vHHSI4uwiFKxe0CMRcteateMHewK7K2CQk'
'X+mwDrvqP6f3BnbVf8be9Acg3okraVM/Y5TYlryik0O9P9RMiUHpWbDYEpdKQJHzAY1p2ECHM6'
'PDUyMTt48HpOj46WDBU2WveNigmCHiyu7LYtqTyDJtKkpMXPiyoBEDLdbEKK/Ns6x1dj3AUZut'
'ihY76FaP9nAM3R+AIoAOaOj+TrXEEgih+8MKak9eidf684cLI9bsNAjM0ZiB0l1nmRGskDwIDX'
'3QfQvNEhmaD0ARQPs1tHanGiIJ9HRiKXcpqCN5TSfrmLcVjmvIeT1smcg47OfSSC7cQndroHB8'
'4rbJE6MzOBqqNnGjdVj09hZGnT+2NSOE3ZK/TTPyhNVhM+oOQBFAW1Qb3al2SwJBG33YyK8z+Y'
'lOTsJ8MNLiMmulJc60WkxF1PubjC+1LbUqyhJnwCF/VZALimwurntZrIU4N+uFbPkp5cUgDGZB'
'MpgQ2KRV7oPwVGE6iqcAN7Bx8mpe06nmGIF2CHRnAIoA2qXJkDvVxkkgJEP+nbHNXPJTnRx7+Z'
'WMN+pCIxTnsjfcH1bnufrANAhHDg6K1LAkKVaG3ONhEKfV8Y9DLcys+xr/clXonZfKRCqmLFOF'
'S92gBk7RuPaiNaEzXzixNNIqr2LHQ1iVKBQDonC38DXMw7QKN19D61y6wM61mqniIr5dASSJ8r'
'GmaUJWceH48MzoLRNTL5qdIZ14emx0fGZ2ZGJ8NNhGWDIZ8bQfXorDksnQzgAUAZRTg/ZOtWQS'
'CEVZPm/b2JXc38nBfR/LFMbg57BJBRcsMS9zBFm90B8EOqmdxeq1nirX3b3Dy5OgB6rGjagsbB'
'kx5dJAx6xFie/T3KgQKbSIlCq7CC53QB0iR184evzkzNjEOCOST437bXyUZMMsbh+KZHdP1NAw'
'KwhfbApYMmm7EIYEpJyMO9VMysj9KUtZ26lm0vvTWwUz6f3YqnwAygKEUvI/YdwoTn4W723P37'
'2hqLCLrmRtJdWtz8UlqgedU0yt82cgIZzMmvNN8Fp0CLPFwb7K06FZFty0YV9laHsAigCywsA7'
'1b76s+C+2+K/tcV1J+8WVvvliD3QtJlaY85OY7WBiJ604Xw4uLNaSTpFiZYmxV3YKlOZgwns4F'
'QxzI2K0816W/Bu5sWkADAjlm5V4SRhP5Ho8AU0o5xrCNMBYyIqpgsfxyE5H7BhAqkfvO4Q1A5Q'
'dyCakPdBoB7N2xRQFqDLiSP/ZzvKW5IHRFn6XuQipFIOhMLYSD04qsqxOUaWI2pdF8aUFitRIu'
'n2jrUSuvIUVfQht6jJKSHVGLWzkX5Dg5lTDw5JypgLbvLFqzggo08MEX0S4Chuv3Kt0BcyJ/UO'
'qSGjjxSFKzQ+wLp/8347P2Cang2hSE55IL0TW2gnHkjvBDJTHsBOXBaAsgAht+geBW1NfpM1uf'
'xLZSJjI8ovXJg/3++8B5kZmQY8huZxJtqn4Jzhj9NC+PMPhMrcVloLQ3MBKAKoK7ijIoeEQOhC'
'Mq6gbcn7O81gqNYVJaWNzkYNevJ1piw4MZgeUkB4RPqOV3W30fTen54eUkDe3+lcrQLKAoSOCw'
'8Y0W9Pfq+TXcv3ZwrD8zBpqPajmgZRQTPJB6Jpg4FWSq2X+JwTdyASH7B4gWYDkuWQ+OC82It1'
'J2NUsRVmZca2jdmRusvEuqvz0qWK27ryN9eDA7Em7ZVJ7ayvVvV6XFufVSXGNU0vaW89NY5dFW'
'wAkjwYZe8P9UAkefxeegOQ5PF7na40sICyAKHfzoO2AUnyUb4k5B8TruOKmsNnxPuxWq5AGqFM'
'x2SqjGsYMbfg65MAO6dBaK4QCGfU+HIh5UUPdYILfsPKoJ352EsPuWlwKXJJ3u1FlYD6Wu/BtN'
'LtQ8Y5rlWD7tYaq7EkgOl9k/jLzXKsLeZI+7rzZPpBXJAD5bkywonZigzDJfMc6VxnO4EUFcbd'
'73UGZzAhEfrRNCNCUsZHO13UpYCyAF2Y7ItfbyJ0R/IQ3rss/3JuUY+9mLf4Grv2WbMvVysh1A'
'DgIl5taAnrutcQCktV3jKhd8bJQBAbaxdwbjHml7cD9fkxIZqnV2x2oD4/oDsCUARQj0ZiCSgL'
'UIHuIC9XUE/yqU52zTwflcDZnV66Cz6U6kIpHdVymyT6udw8jY0fvIoVRZ9pGrM4q/PNckZyor'
'yjnh3jfjE9tBj+Ps3K8/4eOjWfSp8auMI+1elqIQooC9BuUgIHFLQz+bRcnvb7Ikea29RUnIOf'
'RrdVPP8pC8EXaIdAuwJQBFAcIHcnuq12cl4W/O27kj/uTC5I/gI+luMupDU4oU2Vls9wyowrdb'
'vQqDVl/sN0uos++8edHEz7aMR/w3b6BcxtV/6P5OrMY1tPN1cWM/1F/hgXktfIj+bC8ecqG5+K'
'pMChpBNZb8xp53FuaqZ7DyRb6T2ehJrZXGj4Qmw6j06aJrFYtvSJXWoM5jXS0mWzd6nv8QueJH'
'apMfgLnpHuUmPwFzq5xvoPKChK/jteuzQ/JklWsiVW1ujKZvU2nLTX/OfVph8UcNulBmIenb65'
'y00CBmKGbg1API1tGgyxSw3EBEKlknUFZZJH8drl+VNOzzw36YTtZyQ/vSFuLKL/00B3WMFJCl'
'EaC1c1bZcafx/13HGXWn4f7XT1nHap5fdRcMdLAlAWoF46vajOuDv5HzgH3+7kDsljLdvaa55k'
'mnmD3nfT8P+jk8vB3cN/gtwfEzWEVD4u+GZtM32xDBeJVylcfnJ6xDpTOZ06zOkUy2zcbJXVKn'
'J10gRse3crJfLnaVKSyLhbKfExf9HcrZT4WKeLet+tlPhYJ4evVRUUJd/Aa0P5HyxMmNiwmEn2'
'JIUNtlwLCCsZJgm3mKdqt/JsrEHwzfbH3Uqe/MnHTBDuVvL8hrc/7lby/AbsjwcDUBYgdEueV1'
'Am+SZeuzg/JXI6LMPBwstNYiilaXujQry5F8PmDELkz9DHh9xsQIvf9Oa03UqL3wQD3heAsgCh'
'SFRZQdnkcTn9L9w4Z5Ot38+0A6O3zRw2ev4YTeFiNyfY6B9Pzxw2+scx8wMBiCcKZnAdgfYkf4'
'tT9CM5OkVPb3GKFNu1UD3AAdoDzwzocF88yH/iAP29mLEPNJmxy95mrYvYoyTPL/ytXSME2iHQ'
'fACKADKT9R4l+b8Xk/W6gqLkn+TGdop7LuByW07xsQpXAJfkWdebFjYSbuC6ghhLsZaIbWHBso'
'PqUsRS6pT7ZPNgJSB+/vjfm816jxL/P3k1aY8S/z91uijgPUr8BEIU8B0KyiRPiDp+YpOVuJu8'
'zZaDmhfKC4i5lTsG+5Vwva+jr0UpxDvInj/wTyHeQfZPpGcLnD6B2e4OQFmA4AFFrd4Lk1fniH'
'jeAOJ5XqixSti8mnXQ8DhciE1femeDRMYOTQQGYKWwC1HqPscsejv/CQp7TQ43WV7LhUpDDHp1'
'Tg/nhco2GdoZgCKAcrq8C5WGCARvgY0fJa/Faxe78bGzDHpNTvdMoO0C7QxA/G44C+wsgcAiOC'
'3wQt7a1+c4wPvbmhc41wCS7HaivT+4Xy53KulDGO3yWnHQWtUgyS1lkGW+vKwi3DUjWiutFOrl'
'u0t2VwCCXZmisD5hnQ6AI6irDh++M8aFeak0VJhWy6NerZaLtaXSpRx6Bl8+lJZDaIJRP8S/DK'
'2uu+oFrM7j8HD1iJKYhZQAL1QCZDQQdi526IIb7PVppGJDXg+kbg1AWYAQmv470j/5PlDgr4AC'
'350Rwy/46HGzIjeJ32sDa6NamssageodQBZlKWEq2t3MP9xvLPq24enn8flLm/oZflDT2+NUit'
'JStdCHmJS+IChFhjGz902hy3nAfA2opcix39aFkgNxBlwZF5XgsOmxDT4QK3KurGIIR/5sBeIQ'
'9p9L2omsL+U/uZg/8P/0fEJy4LTP2RuSPoMZPUb80KUBKAPQ5cnT4uMKipK345l9+SOFY2XkUu'
'g0XNa2mlg4y4tbpdnqg09FNsyOAJQBaBfdoEYVlEnuzzGvvMZ7sEJXhFzdxcrCFZ/FQlDWPGEb'
'mdsE8MkMQDz2bjrRfQrKJu+Uj+1JfUz1w2A4iN13podDxet3ynBfyyisLflFxmX+T1D7hHg1Xw'
'U4FouJTu47jYrVLxHgTYV6daXknG6u4bW5bXCLKFdQgHm5UUcLK7HKrKfqKTTluipVSTzYSglV'
'scv1laGNvbrpAmdewt6UTYu9fByQMBeW+7dwLs14RbUS7wbE8Kw6+5CApmjKhYFWeeKcvD0gQQ'
'+iZMvIdW6TgIXYOTbsww/PuN4XgDIAHSA96LCC2pNfkv24JMhxgflQHQ3qtw3GhTf9l9Ljgif9'
'kox7tYI6kvfKuL1+XJLWrJFwRg/GP1Ml6R0MDf/2e9NDoxf7e2Xo10Xstv91ML+Huoj5vTw4WU'
'08L+j2YaxIyUt4w4x6YkataaF55LxZtDBXXlvRm88GHqscBUT/6+Ao2+Kn85/gKL8pjGC34ygp'
'NrvDnoMdG0/uCkAZgFCv8kttCouSP2CGkP+VttQB9LUj9Kg7M0LgCtaKIbVScTWMwOECX2Pa/0'
'RaslcCco7VY+naGNoYnsNyoQkJL1ktq9pbWqYnSmzA4LJj1TVHpNZvdyAsBlRsrhHRFD8VDA1y'
'DxJm6t7ZOeSf4mBPKQlctMohPidOK2ktVZ1B0Jf40w96D6pYEdetsjNWhAjy+p1lKV0r/vJg+o'
'X+eqkkJhiJH+Q8xoOemdTLaw3NDHHmSSE7iYTALjD9WWin/FiUuiY6IY2X1Pr9TMix76ReXA0M'
'XEcu1uaJQvR+YJa8Rz0ZRkZhXQEoA9AWum5/PaOwTPKQiIH3Zc5OhsILrY2PBRTAnOmKoFhgQq'
'yuLt5U0d4t2KK4BAf2mmesmw90itPu4iCySlOwN/DxFBVwlCxHjxvGy4upL/ks6diHB4M5w0AX'
'lO/hMcwIIu4XLphV4qoETXX1Blq2J7n26muO+B3hvq9ekGZVLj8kgvTzkcKyyaeFMfxitMmOeB'
'nNqp15teo+957dRoHLQ0J6azj+KyWJZK4uL4Rvxd7hvtHWMDM8NcN8fcNPhOJWNBkqlQFNgqt+'
'Ok2TUCU+LTT584aBtuRzOY4BKacQEMYRFM1BLkYfyFHYaUUBgHNGir1LP/G1Na5u01g1ByaHxp'
'XCKrbnsZ3X33jd9dde7xcDMczz3BuAMgDtTy6JfyGrsPbkizn2gnxdNPqmSEHNIXC+m3JQK2io'
'MCE2eH5Sy7kGLrnlqiYEC6uRINjG6nK1uBBURQxNNxyOycgIIhTFFxjabpl8UGy/rrLA/Bo4Y/'
'OnqlKopr5WXXWlmlQljlWKFAOlnaUREtDYzBoE1DUqy6V6yACtHb2Y9WNzQeia7Kp3RqvISSoG'
'3QebQvXPazdvuOrqa64KDieUH96o/QEoA9DFSS/aIAisI/lyju2qP5Zml4L7FuUX0z49FTBOAN'
'/JoWQ1K5dGLAqJH2xrqkkAJ6FgSLKk/nX7BLt6rkGhxKVacR7hJYM2fZrIbPHO4iDNdpDOHsCD'
'RFb4N2Y+uAB5dT4lRtGp6brDHtvQBxmRWwNQBqCE1Kb3GbY7k6+zIsUWhtTZGdhwcCyzwRENDk'
'ZwlDQtITa5DO3FeSF1+0jIcysRdssW19aKwlHMlaehSEt1F6XjE2YtLUTql5bWUDRZ8hCLZ+jQ'
'KKmnC/EQ4dQbkI2EpiVmXTS9yvkLmGYaRqQkI2xLAMoAtD3ZGf+Osddc8i02DOV/OHUzcCIZq4'
'TJsFjxlyk9mTizXNi65OKQ8bpF1gRRckbqC5wDUnNVLWG84WXDz/m0ZrnRwvYQiA+EEH5LbGYe'
'lAFoX3JR/Gajmq7k7+SM/mmUXqC19hXua/GaoovIljHbDPidebLiDQ0M0gF44HcQTsVlCe8k/N'
'UWuHWJsTS9eoycnIkL/SMlzmE9yYWnZujoc12JYTgH5081qTSVklOi9Bp5fsrH4SM3HA6kFWL6'
'/i594roId38nJ+4LRhtx8o9yyXlngDuXZREQiChNmNidpdKq2UAryIayLGmz5KC7MnefbsEJdT'
'ji/uigXZivSWvkgZBkbNzTHEdhxHOeXOfaqz0OEPr3j94KI6AMQLDCvM4kdnfyii44HvKPZDYe'
'EBgkJMXBnE567sU2WbpLD4wpszUJsiUeoP1Qyq5RqUltlzrHdrGgTaYYuDzaJAKucLpcVLG84V'
'syieHJMaDEqd7SnRprgUFUHM7aZZbXQlcqNK1SzyD+7VqdiDSH4K5yQl691li1RuexLZ21LESS'
'r3Mi7Wo1VY6fVQNuEXP+m3aj3zQEG/J+dAegDEDbSJF+ix36LcnrZNN+6bwOfXgt3RC9oZWZDx'
'dQftl2xKyHTQFavmhLOk2HHfepO08qDFvuclfToVcrt/JA6R8muXdqsg2MQHBSbmSa6ZDiZqaJ'
'EMHXpfG3hfD3OsHfV+3gb03eJPj7bBP+ePfoqio+VVkhl8nC6S9rxac0NxxysTizWnt5gw1DmQ'
'jy8MTvrmdqrXlXimFSfllv2aaoFAJFpcDKiGteG7apDowzCDJ8UxobWwkbbxJsPGjUtC15axf7'
'354IsIG1cFiMMS8uXao2Bp8xbsW7OZASpbv5+FgXAUJRubqwsWRugL1y9d8L2s73uF7nEYwwSc'
'bdzgCUAWhPko8/YeS2PbkfDyX5D0ateexKsSKhI7pCY3p985jvch+Y3P/X6HErRGgirycXgDIA'
'dSfb47caVSXJu7o4ZOyLwaKhaYXUIJZFCd/znrHFaqMitBeEOtB1aPlssQHp/hQa26F9z0Ce8+'
'XVBfuzVlrkJpVVVyEYfjcUltvQUT3sROBt3NCVS6dbaCfnI5kRUci4SQJQBqCdyYXxF41qdiS/'
'3MWmkXdFKeNz6CpmG7SlGDexXEa2Bj1rfk9cWGxU5l2rBzRupB3QNo3BXVXi9ESEbyyTXoIV0a'
'uQ9cFyS/U2neDRzKkRd8jr6wpAGYBgHfmaIaEneUCQ8Cdp+xCHBFhN/Mo64rBvgrbhiwAo21qG'
'hQyx+MXV4ryWmG02MIvMQ09iNvYF2kCdh7K8UzEyFKUjR6kC+2ss9RvZud9c9jgIO+L8zxbW3u'
'BYIXbxgTRCegghDwhCVhS0M/mNLnZD/wePDrdmdUeEDEUYyGY8RdNVubyghMXDxB5MCjGN/MEQ'
'lAFoK+mVH2hX2K7k97v4EvKL7UG7FvRz56lpDKiZKG2Sof2RTiSO0IALfVW/qEvCc4Hk6TLitq'
'0bUC7cJHaJFHZI9D3agmqd3U71BioblFw2T5DrKwfOipO7dH8XsxzYwyWqshhE7DaXOAjt7OXF'
'2M+8HxR0UKpwrVQluIxz/T3BwO40Szs1Gy6Xey6sVUUChL7k4JpiNnr2BziXBNdVDuzKQ4VbSp'
'VSzdpyFu8sWxFyZ9KKw6IFdZLx89yO3WWJBqZ9qzcvG1JmwyXSAovlBa3BEYsdz5BXV+yJn04K'
'v8vtoSKWCzpug2vV6iBp5ChMS0yMDQpFzGwh1nLypFpiq4saDWeFeluex4DCET7LxLs1AGUAwl'
'XxS8aHdicfFo3x56I0hYsEM0GuJctdKqLv9e12rt6ozZ/iwAuxpHATcL0kwYfLCrRrA7JQWi6u'
'8wWR3lss6kY3MzWO2VjTwc5bJl0XWFMQVfnhtKq4m9DwYVEV71LQnuQP8cyB/M0pZnw2HBDvbC'
'wuluf57vSyRnWteP4yM7jRI2iNv70rAGUAujDZHz9k23Rh8l/xUG/+9VFL/hickvM2+ugZ2tTo'
'U7QRkXe2ifHn+9ASEETFi8kHoAxAFyWXznWQcrdWvTr+1va4JyzZNsTwnm1zDToac435O+ljp4'
'/kLxHj6SH+da6xeIjTZ9dIWZIXet8SxVvGg3F6boi73DN7o0LU330kbzVAbJShGXtiyj/cszvu'
'oIM4W17Ym6HXuqba6a+xhZ59cY4nhR+y9EN2qpP/pp/2x10g3FmYNve20W9bpnIAjKAY0kzcE0'
'7sOEeC91wab5FyWbNcnIpn2DXVLbAZgNKjZtKjHrvuB65Zqg4523K1tiSG5ABvgqybAsjq3HO/'
'uyXOJWjnPJhE8a9GdBPHHz1H3oVWqqvrYgQ9cviqG5j8XJwGqjhXa6SG4Qastc/g6K8xu+fO7R'
'ZvKQZuzayokywh0qp4A8zwKscOnj4ydLgArl6pq+HO8nVFeVYd/sTY8dHx6VFp3hnHMCB2cMvn'
'XA7RWRckzwMw1+3+nc1dkMQJgiERG3oBifkLkgNJlL8EydphaUCJMUWyJaGHht6Cx4lkt3Lvym'
'tz0i3+gmR7gn4YT7NaTaK6+WFStYXjHnst18YvbqWZJg7WwbD9ASQiyIHkaQEkSxA0tPhQpKAo'
'2cVG9QciK8unxd98gchjfod99TpJZRcnUdOk6ZrAWU2987XBgDp6ZSNxpyovBhWjl7kAjjShJE'
'2TGxlpmc3T7nscyBYeWo8MRDJiDduTK9xCEce4i69gHoKVIs3BQ7IEQZbDhELoNsZa2rNSS+av'
'FspGh7eU1vjXQm2VqynCGeWjPiVvz6YG7yWG3EVbnjhYO8M6A0hEEMS9e0iWIAh7P6mQbLKfEx'
'1HuXzuoJrxrHPE3HJ1Lm1F2MgShtxhDyaIcGsMvDfZ7T6O1hD7UxOEp3Y/R5h6CCaEANND7Pq8'
'RE58/tKCfCwIkk5VtdWDEPErSCl/lF2nfBL6OfLgU3CM1pAU7apEBkUiafWiYiFsUAK6g2J890'
'gBRP37XqvLd48v73dvrzlo0lXjCs7dmsKcb25BvKNcKUp5a/ooRnlGQI6DxMMHywvPPGsVxKBI'
'Zq/WPpQZWgnEXhiE+MZd5dRmUTHxyICogK0/Kq03BC9NZ26oV/Y6UqYBHF+SHOB9FFg7w3IBJC'
'JIl9JDpEyjnwOjn6+QKBlgYhwu3G5apLvaNdFeSHXog1eg2/TS2qmjhWsO33jdkJ8cDjEG7Sea'
'SBysnWGdAQSfNkKM9BAPgBBN4v8vAOHTZA==')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
BuildsServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'rpc.proto']['descriptor'],
'service_descriptor': _INDEX[u'rpc.proto']['services'][u'Builds'],
}
| 79.514113 | 80 | 0.883783 |
2248bca54a31a5b38410001e034b4c337a0df547 | 1,358 | py | Python | 18-05-05-Apache-Spark-with-Python-Big-Data-with-PySpark-and-Spark/4_Pair_RDD/4_AirportsUppercaseProblem.py | maraboinavamshi/courses | 48f255ffb1903ba20865c2b91b488758d5cb1a09 | [
"Apache-2.0"
] | 15 | 2017-09-19T08:09:01.000Z | 2019-04-29T00:37:51.000Z | 18-05-05-Apache-Spark-with-Python-Big-Data-with-PySpark-and-Spark/4_Pair_RDD/4_AirportsUppercaseProblem.py | chitrita/Courses-1 | 7713267ee5c92e488086588ac41490c44b4f7350 | [
"Apache-2.0"
] | null | null | null | 18-05-05-Apache-Spark-with-Python-Big-Data-with-PySpark-and-Spark/4_Pair_RDD/4_AirportsUppercaseProblem.py | chitrita/Courses-1 | 7713267ee5c92e488086588ac41490c44b4f7350 | [
"Apache-2.0"
] | 17 | 2018-02-27T03:15:54.000Z | 2019-04-24T09:26:46.000Z | '''
Create a Spark program to read the airport data from in/airports.text, generate a pair RDD with airport name
being the key and country name being the value. Then convert the country name to uppercase and
output the pair RDD to out/airports_uppercase.text
Each row of the input file contains the following columns:
Airport ID, Name of airport, Main city served by airport, Country where airport is located, IATA/FAA code,
ICAO Code, Latitude, Longitude, Altitude, Timezone, DST, Timezone in Olson format
Sample output:
("Kamloops", "CANADA")
("Wewak Intl", "PAPUA NEW GUINEA")
...
'''
from pyspark import SparkContext, SparkConf
import re
class Utils():
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
if __name__ == "__main__":
conf = SparkConf().setAppName("airports").setMaster("local[*]")
sc = SparkContext(conf = conf)
airportsRDD = sc.textFile("data/airports.text")
airportPairRDD = airportsRDD.map(lambda line: \
(Utils.COMMA_DELIMITER.split(line)[1], \
Utils.COMMA_DELIMITER.split(line)[3]))
# mapValues modifies the values, without changing the keys
# In this scenario, it will change all the letter to uppercase letters.
upperCase = airportPairRDD.mapValues(lambda countryName: countryName.upper())
upperCase.saveAsTextFile("output/airports_uppercase.text")
| 31.581395 | 108 | 0.717231 |
158e7db4cd83b61167706c5c659ee60939e7ccfa | 1,211 | py | Python | spo/spo/report/alle_anfragen/alle_anfragen.py | libracore/spo | c6617a4624d683e27ee3fde745313c30504f3fd1 | [
"MIT"
] | null | null | null | spo/spo/report/alle_anfragen/alle_anfragen.py | libracore/spo | c6617a4624d683e27ee3fde745313c30504f3fd1 | [
"MIT"
] | 6 | 2019-08-23T18:36:26.000Z | 2019-11-12T13:12:12.000Z | spo/spo/report/alle_anfragen/alle_anfragen.py | libracore/spo | efff6da53a776c4483f06d9ef1acc8a7aa96b28e | [
"MIT"
] | 1 | 2021-08-14T22:22:43.000Z | 2021-08-14T22:22:43.000Z | # Copyright (c) 2013, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [
{"label": _("Anfrage"), "fieldname": "id", "fieldtype": "Link", "options": "Anfrage"},
{"label": _("Datum"), "fieldname": "Datum", "fieldtype": "Date"},
{"label": _("Vorname"), "fieldname": "Vorname", "fieldtype": "Data"},
{"label": _("Nachname"), "fieldname": "Nachname", "fieldtype": "Data"},
{"label": _("Wohnort"), "fieldname": "Wohnort", "fieldtype": "Data"},
{"label": _("Kanton"), "fieldname": "Kanton", "fieldtype": "Data"},
{"label": _("Erstellt von"), "fieldname": "Erstellt_von", "fieldtype": "Data"},
{"label": _("Zuletzt bearbeitet von"), "fieldname": "zuletzt_bearbeitet_von", "fieldtype": "Data"}
], []
data = frappe.db.sql("""SELECT `name`, `datum`, `patient_vorname`, `patient_nachname`, `patient_ort`, `patient_kanton`, `owner`, `modified_by` FROM `tabAnfrage`""", as_list=True)
#"ID:Link/Anfrage", "Datum:Date", "Vorname:Date", "Nachname:Data", "Wohnort:Data", "Kanton:Data", "Erstellt von:Data", "Zuletzt bearbeitet von:Data"
return columns, data
| 52.652174 | 179 | 0.662263 |
530a683c02f9d1ac59d6d8ce98f047f271af8668 | 137,717 | py | Python | pandas/tests/test_resample.py | The-Interns/pandas | d864cde5db3126fef258646e94fc621edb75d736 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_resample.py | The-Interns/pandas | d864cde5db3126fef258646e94fc621edb75d736 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_resample.py | The-Interns/pandas | d864cde5db3126fef258646e94fc621edb75d736 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # pylint: disable=E1101
from warnings import catch_warnings
from datetime import datetime, timedelta
from functools import partial
from textwrap import dedent
from operator import methodcaller
import pytz
import pytest
import dateutil
import numpy as np
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
import pandas.util.testing as tm
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
import pandas as pd
from pandas import (Series, DataFrame, Panel, Index, isna,
notna, Timestamp, Timedelta)
from pandas.compat import range, lrange, zip, OrderedDict
from pandas.errors import UnsupportedFunctionCall
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Minute, BDay
from pandas.core.groupby.groupby import DataError
import pandas.core.common as com
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import period_range, PeriodIndex, Period
from pandas.core.resample import DatetimeIndex, TimeGrouper
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
bday = BDay()
# The various methods we support
downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'var', 'ohlc', 'quantile']
upsample_methods = ['count', 'size']
series_methods = ['nunique']
resample_methods = downsample_methods + upsample_methods + series_methods
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResampleAPI(object):
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
self.frame = DataFrame(
{'A': self.series, 'B': self.series, 'C': np.arange(len(dti))})
def test_str(self):
r = self.series.resample('H')
assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '
'label=left, convention=start, base=0]' in str(r))
def test_api(self):
r = self.series.resample('H')
result = r.mean()
assert isinstance(result, Series)
assert len(result) == 217
r = self.series.to_frame().resample('H')
result = r.mean()
assert isinstance(result, DataFrame)
assert len(result) == 217
def test_groupby_resample_api(self):
# GH 12448
# .groupby(...).resample(...) hitting warnings
# when appropriate
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
# replication step
i = pd.date_range('2016-01-03', periods=8).tolist() + \
pd.date_range('2016-01-17', periods=8).tolist()
index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],
names=['group', 'date'])
expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},
index=index)
result = df.groupby('group').apply(
lambda x: x.resample('1D').ffill())[['val']]
assert_frame_equal(result, expected)
def test_groupby_resample_on_api(self):
# GH 15021
# .groupby(...).resample(on=...) results in an unexpected
# keyword warning.
df = DataFrame({'key': ['A', 'B'] * 5,
'dates': pd.date_range('2016-01-01', periods=10),
'values': np.random.randn(10)})
expected = df.set_index('dates').groupby('key').resample('D').mean()
result = df.groupby('key').resample('D', on='dates').mean()
assert_frame_equal(result, expected)
def test_pipe(self):
# GH17905
# series
r = self.series.resample('H')
expected = r.max() - r.mean()
result = r.pipe(lambda x: x.max() - x.mean())
tm.assert_series_equal(result, expected)
# dataframe
r = self.frame.resample('H')
expected = r.max() - r.mean()
result = r.pipe(lambda x: x.max() - x.mean())
tm.assert_frame_equal(result, expected)
def test_getitem(self):
r = self.frame.resample('H')
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.resample('H')['B']
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
def test_select_bad_cols(self):
g = self.frame.resample('H')
pytest.raises(KeyError, g.__getitem__, ['D'])
pytest.raises(KeyError, g.__getitem__, ['A', 'D'])
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'D']]
def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
def test_api_compat_before_use(self):
# make sure that we are setting the binner
# on these attributes
for attr in ['groups', 'ngroups', 'indices']:
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
rs = ts.resample('30s')
# before use
getattr(rs, attr)
# after grouper is initialized is ok
rs.mean()
getattr(rs, attr)
def tests_skip_nuisance(self):
df = self.frame
df['D'] = 'foo'
r = df.resample('H')
result = r[['A', 'B']].sum()
expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)
assert_frame_equal(result, expected)
expected = r[['A', 'B', 'C']].sum()
result = r.sum()
assert_frame_equal(result, expected)
def test_downsample_but_actually_upsampling(self):
# this is reindex / asfreq
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
result = ts.resample('20s').asfreq()
expected = Series([0, 20, 40, 60, 80],
index=pd.date_range('2012-01-01 00:00:00',
freq='20s',
periods=5))
assert_series_equal(result, expected)
def test_combined_up_downsampling_of_irregular(self):
# since we are reallydoing an operation like this
# ts2.resample('2s').mean().ffill()
# preserve these semantics
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ts2.resample('2s', how='mean', fill_method='ffill')
expected = ts2.resample('2s').mean().ffill()
assert_series_equal(result, expected)
def test_transform(self):
r = self.series.resample('20min')
expected = self.series.groupby(
pd.Grouper(freq='20min')).transform('mean')
result = r.transform('mean')
assert_series_equal(result, expected)
def test_fillna(self):
# need to upsample here
rng = pd.date_range('1/1/2012', periods=10, freq='2S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
r = ts.resample('s')
expected = r.ffill()
result = r.fillna(method='ffill')
assert_series_equal(result, expected)
expected = r.bfill()
result = r.fillna(method='bfill')
assert_series_equal(result, expected)
with pytest.raises(ValueError):
r.fillna(0)
def test_apply_without_aggregation(self):
# both resample and groupby should work w/o aggregation
r = self.series.resample('20min')
g = self.series.groupby(pd.Grouper(freq='20min'))
for t in [g, r]:
result = t.apply(lambda x: x)
assert_series_equal(result, self.series)
def test_agg_consistency(self):
# make sure that we are consistent across
# similar aggregations with and w/o selection list
df = DataFrame(np.random.randn(1000, 3),
index=pd.date_range('1/1/2012', freq='S', periods=1000),
columns=['A', 'B', 'C'])
r = df.resample('3T')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
def test_agg(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'],
['mean', 'std']])
for t in cases:
result = t.aggregate([np.mean, np.std])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
result = t.aggregate({'A': np.mean,
'B': np.std})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std']})
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
for t in cases:
result = t['A'].aggregate(['mean', 'sum'])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum'),
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),
('r1', 'A', 'sum'),
('r2', 'B', 'mean'),
('r2', 'B', 'sum')])
def test_agg_misc(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
# passed lambda
for t in cases:
result = t.agg({'A': np.sum,
'B': lambda x: np.std(x, ddof=1)})
rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([r['A'].sum(), rcustom], axis=1)
assert_frame_equal(result, expected, check_like=True)
# agg with renamers
expected = pd.concat([t['A'].sum(),
t['B'].sum(),
t['A'].mean(),
t['B'].mean()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
expected = pd.concat([t['A'].sum(),
t['A'].std(),
t['B'].mean(),
t['B'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.agg(OrderedDict([('A', ['sum', 'std']),
('B', ['mean', 'std'])]))
assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
for t in cases:
result = t[['A', 'B']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# series like aggs
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std')])
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
t[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
pytest.raises(KeyError, f)
def test_agg_nested_dicts(self):
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
for t in cases:
def f():
t.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(ValueError, f)
for t in cases:
expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),
t['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_try_aggregate_non_existing_column(self):
# GH 16766
data = [
{'dt': datetime(2017, 6, 1, 0), 'x': 1.0, 'y': 2.0},
{'dt': datetime(2017, 6, 1, 1), 'x': 2.0, 'y': 2.0},
{'dt': datetime(2017, 6, 1, 2), 'x': 3.0, 'y': 1.5}
]
df = DataFrame(data).set_index('dt')
# Error as we don't have 'z' column
with pytest.raises(KeyError):
df.resample('30T').agg({'x': ['mean'],
'y': ['median'],
'z': ['sum']})
def test_selection_api_validation(self):
# GH 13500
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
df_exp = DataFrame({'a': rng}, index=index)
# non DatetimeIndex
with pytest.raises(TypeError):
df.resample('2D', level='v')
with pytest.raises(ValueError):
df.resample('2D', on='date', level='d')
with pytest.raises(TypeError):
df.resample('2D', on=['a', 'date'])
with pytest.raises(KeyError):
df.resample('2D', level=['a', 'date'])
# upsampling not allowed
with pytest.raises(ValueError):
df.resample('2D', level='d').asfreq()
with pytest.raises(ValueError):
df.resample('2D', on='date').asfreq()
exp = df_exp.resample('2D').sum()
exp.index.name = 'date'
assert_frame_equal(exp, df.resample('2D', on='date').sum())
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
class Base(object):
"""
base class for resampling testing, calling
.create_series() generates a series of each index type
"""
def create_index(self, *args, **kwargs):
""" return the _index_factory created using the args, kwargs """
factory = self._index_factory()
return factory(*args, **kwargs)
@pytest.fixture
def _index_start(self):
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end(self):
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq(self):
return 'D'
@pytest.fixture
def index(self, _index_start, _index_end, _index_freq):
return self.create_index(_index_start, _index_end, freq=_index_freq)
@pytest.fixture
def _series_name(self):
raise com.AbstractMethodError(self)
@pytest.fixture
def _static_values(self, index):
return np.arange(len(index))
@pytest.fixture
def series(self, index, _series_name, _static_values):
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def frame(self, index, _static_values):
return DataFrame({'value': _static_values}, index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(self, request, index, _series_name, _static_values):
if request.param == Series:
return Series(_static_values, index=index, name=_series_name)
if request.param == DataFrame:
return DataFrame({'value': _static_values}, index=index)
@pytest.mark.parametrize('freq', ['2D', '1H'])
def test_asfreq(self, series_and_frame, freq):
obj = series_and_frame
result = obj.resample(freq).asfreq()
new_index = self.create_index(obj.index[0], obj.index[-1], freq=freq)
expected = obj.reindex(new_index)
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
frame.iloc[1] = None
result = frame.resample('1H').asfreq(fill_value=4.0)
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index, fill_value=4.0)
assert_frame_equal(result, expected)
def test_resample_interpolate(self):
# # 12925
df = self.create_series().to_frame('value')
assert_frame_equal(
df.resample('1T').asfreq().interpolate(),
df.resample('1T').interpolate())
def test_raises_on_non_datetimelike_index(self):
# this is a non datetimelike index
xp = DataFrame()
pytest.raises(TypeError, lambda: xp.resample('A').mean())
def test_resample_empty_series(self):
# GH12771 & GH12868
s = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
# need to test for ohlc from GH13083
methods = [method for method in resample_methods
if method != 'ohlc']
for method in methods:
result = getattr(s.resample(freq), method)()
expected = s.copy()
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
index = self.create_series().index[:0]
f = DataFrame(index=index)
for freq in ['M', 'D', 'H']:
# count retains dimensions too
methods = downsample_methods + upsample_methods
for method in methods:
result = getattr(f.resample(freq), method)()
if method != 'size':
expected = f.copy()
else:
# GH14962
expected = Series([])
expected.index = f.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_almost_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
@pytest.mark.parametrize("index", tm.all_timeseries_index_generator(0))
@pytest.mark.parametrize(
"dtype",
[np.float, np.int, np.object, 'datetime64[ns]'])
def test_resample_empty_dtypes(self, index, dtype):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for how in downsample_methods + upsample_methods:
empty_series = Series([], index, dtype)
try:
getattr(empty_series.resample('d'), how)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
df = self.create_series().to_frame('value')
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
for arg in ['mean', {'value': 'mean'}, ['mean']]:
result_agg = df.resample('2D', loffset='2H').agg(arg)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result_how = df.resample('2D', how=arg, loffset='2H')
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value',
'mean')])
# GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
if isinstance(expected.index, TimedeltaIndex):
with pytest.raises(AssertionError):
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
else:
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
def test_apply_to_empty_series(self):
# GH 14313
series = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
result = series.resample(freq).apply(lambda x: 1)
expected = series.resample(freq).apply(np.sum)
assert_series_equal(result, expected, check_dtype=False)
def test_resampler_is_iterable(self):
# GH 15314
series = self.create_series()
freq = 'H'
tg = TimeGrouper(freq, convention='start')
grouped = series.groupby(tg)
resampled = series.resample(freq)
for (rk, rv), (gk, gv) in zip(resampled, grouped):
assert rk == gk
assert_series_equal(rv, gv)
def test_resample_quantile(self):
# GH 15023
s = self.create_series()
q = 0.75
freq = 'H'
result = s.resample(freq).quantile(q)
expected = s.resample(freq).agg(lambda x: x.quantile(q))
tm.assert_series_equal(result, expected)
class TestDatetimeIndex(Base):
_index_factory = lambda x: date_range
@pytest.fixture
def _series_name(self):
return 'dti'
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def create_series(self):
i = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='dti')
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
assert result.index.name == 'index'
result = s.resample('5min', closed='left', label='right').mean()
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min',
name='index')
expected = Series([s[:5].mean(), s[5:10].mean(),
s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_string_kwargs(self):
# Test for issue #19303
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
# Check that wrong keyword argument strings raise an error
with pytest.raises(ValueError):
s.resample('5min', label='righttt').mean()
with pytest.raises(ValueError):
s.resample('5min', closed='righttt').mean()
with pytest.raises(ValueError):
s.resample('5min', convention='starttt').mean()
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = downsample_methods
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = getattr(s.resample(
'5min', closed='right', label='right'), arg)()
expected = s.groupby(grouplist).agg(func)
assert result.index.name == 'index'
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_numpy_compat(self):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
for func in ('min', 'max', 'sum', 'prod',
'mean', 'var', 'std'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func),
func, 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func), axis=1)
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass(object):
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A': np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days', freq='30T', periods=50)
df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta(
np.arange(1480), unit='T'))
result = df.resample('30T').sum()
assert_frame_equal(result, expected)
s = df['A']
result = s.resample('30T').sum()
assert_series_equal(result, expected['A'])
def test_resample_single_period_timedelta(self):
s = Series(list(range(5)), index=pd.timedelta_range(
'1 day', freq='s', periods=5))
result = s.resample('2s').sum()
expected = Series([1, 5, 4], index=pd.timedelta_range(
'1 day', freq='2s', periods=3))
assert_series_equal(result, expected)
def test_resample_timedelta_idempotency(self):
# GH 12072
index = pd.timedelta_range('0', periods=9, freq='10L')
series = Series(range(9), index=index)
result = series.resample('10L').mean()
expected = series
assert_series_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
import datetime
s = Series(np.arange(1., 6), index=[datetime.datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
@pytest.mark.parametrize('loffset', [timedelta(minutes=1),
'1min', Minute(1),
np.timedelta64(1, 'm')])
def test_resample_loffset(self, loffset):
# GH 7687
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=loffset).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
expected = ser.resample('w-sun', loffset=-bday).last()
assert result.index[0] - bday == expected.index[0]
def test_resample_loffset_upsample(self):
# GH 20744
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).ffill()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[5], s[10], s[-1]],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
def test_resample_loffset_count(self):
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method(self):
# GH9915
s = Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').nearest(limit=2)
expected = ts.reindex(result.index, method='nearest', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result(self):
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe(self):
df = (
DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
pytest.raises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start='0s', periods=25, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample('2s', base=5).mean()
without_base = ts.resample('2s').mean()
exp_without_base = timedelta_range(start='0s', end='25s', freq='2s')
exp_with_base = timedelta_range(start='5s', end='29s', freq='2s')
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex(self):
# GH #12169
df = DataFrame({'Group_obj': 'A'},
index=pd.to_timedelta(list(range(20)), unit='s'))
df['Group'] = df['Group_obj'].astype('category')
result = df.resample('10s').agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame({'Group_obj': ['A', 'A'],
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex(['Group_obj', 'Group'], axis=1)
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg(self):
# aggregate a period resampler with a lambda
s2 = Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01', freq='H', periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault(self):
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation(self):
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coerceion(self):
pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = (df.astype("float64")
.resample("H")
.mean()
["a"]
.interpolate("cubic")
)
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error(self):
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
exp.index = exp.index + Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique(self):
# GH 12352
df = DataFrame({
'ID': {Timestamp('2015-06-05 00:00:00'): '0010100903',
Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {Timestamp('2015-06-05 00:00:00'): '2015-06-05',
Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap(self):
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
@pytest.mark.parametrize('n', [10000, 100000])
@pytest.mark.parametrize('k', [10, 100, 1000])
def test_resample_group_info(self, n, k):
# GH10914
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst(self):
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_downsample_across_dst(self):
# GH 8531
tz = pytz.timezone('Europe/Berlin')
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq='2H')
result = Series(5, index=dates).resample('H').mean()
expected = Series([5., np.nan] * 3 + [5.],
index=date_range(tz.localize(dt), periods=7,
freq='H'))
tm.assert_series_equal(result, expected)
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_timedelta_values(self):
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range('1 day', '4 day', freq='4D')
df = DataFrame({'time': times}, index=times)
times2 = timedelta_range('1 day', '4 day', freq='2D')
exp = Series(times2, index=times2, name='time')
exp.iloc[1] = pd.NaT
res = df.resample('2D').first()['time']
tm.assert_series_equal(res, exp)
res = df['time'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_datetime_values(self):
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(self):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = self.series.resample('D').apply(f, multiplier)
expected = self.series.resample('D').mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = self.series.resample('D').apply(f, add_arg=multiplier)
expected = self.series.resample('D').mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = pd.DataFrame({"A": 1, "B": 2},
index=pd.date_range('2017', periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier)
expected = df.groupby("A").resample('D').mean().multiply(multiplier)
assert_frame_equal(result, expected)
class TestPeriodIndex(Base):
_index_factory = lambda x: period_range
@pytest.fixture
def _series_name(self):
return 'pi'
def create_series(self):
# TODO: replace calls to .create_series() by injecting the series
# fixture
i = period_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='pi')
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + 1).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
def test_selection(self, index, freq, kind):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
with pytest.raises(NotImplementedError):
df.resample(freq, on='date', kind=kind)
with pytest.raises(NotImplementedError):
df.resample(freq, level='d', kind=kind)
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
pytest.raises(ValueError, lambda: ts.resample('a-dec').mean())
pytest.raises(ValueError, lambda: ts.resample('q-mar').mean())
pytest.raises(ValueError, lambda: ts.resample('M').mean())
pytest.raises(ValueError, lambda: ts.resample('w-thu').mean())
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention):
freq = 'Q-{month}'.format(month=month)
ts = _simple_pts('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
for method in resample_methods:
result = getattr(series.resample('M'), method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
with pytest.raises(IncompatibleFrequency):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_with_pytz(self):
# GH 13238
s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H",
tz="US/Eastern"))
result = s.resample("D").mean()
expected = Series(2, index=pd.DatetimeIndex(['2017-01-01',
'2017-01-02'],
tz="US/Eastern"))
assert_series_equal(result, expected)
# Especially assert that the timezone is LMT for pytz
assert result.index.tz == pytz.timezone('US/Eastern')
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
@pytest.mark.parametrize('day', DAYS)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_weekly_upsample(self, day, target, convention):
freq = 'W-{day}'.format(day=day)
ts = _simple_pts('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='start').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
pytest.raises(Exception, lambda: s.resample('A').ffill())
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000],
tz='UTC').tz_convert('America/Chicago')
df = pd.DataFrame([1, 2], index=index)
result = df.resample('12h', closed='right',
label='right').last().ffill()
expected_index_values = ['2016-03-09 12:00:00-06:00',
'2016-03-10 00:00:00-06:00',
'2016-03-10 12:00:00-06:00',
'2016-03-11 00:00:00-06:00',
'2016-03-11 12:00:00-06:00',
'2016-03-12 00:00:00-06:00',
'2016-03-12 12:00:00-06:00',
'2016-03-13 00:00:00-06:00',
'2016-03-13 13:00:00-05:00',
'2016-03-14 01:00:00-05:00',
'2016-03-14 13:00:00-05:00',
'2016-03-15 01:00:00-05:00',
'2016-03-15 13:00:00-05:00']
index = pd.to_datetime(expected_index_values, utc=True).tz_convert(
'America/Chicago')
expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
foo = Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
@pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']])
def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result_how = df.resample('2D', how=agg_arg, loffset='2H',
kind=kind)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')])
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
@pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)])
@pytest.mark.parametrize('kind', [None, 'period'])
def test_upsampling_ohlc(self, freq, period_mult, kind):
# GH 13083
pi = PeriodIndex(start='2000', freq='D', periods=10)
s = Series(range(len(pi)), index=pi)
expected = s.to_timestamp().resample(freq).ohlc().to_period(freq)
# timestamp-based resampling doesn't include all sub-periods
# of the last original period, so extend accordingly:
new_index = PeriodIndex(start='2000', freq=freq,
periods=period_mult * len(pi))
expected = expected.reindex(new_index)
result = s.resample(freq, kind=kind).ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('periods, values',
[([pd.NaT, '1970-01-01 00:00:00', pd.NaT,
'1970-01-01 00:00:02', '1970-01-01 00:00:03'],
[2, 3, 5, 7, 11]),
([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT,
pd.NaT, pd.NaT, '1970-01-01 00:00:02',
'1970-01-01 00:00:03', pd.NaT, pd.NaT],
[1, 2, 3, 5, 6, 8, 7, 11, 12, 13])])
@pytest.mark.parametrize('freq, expected_values',
[('1s', [3, np.NaN, 7, 11]),
('2s', [3, int((7 + 11) / 2)]),
('3s', [int((3 + 7) / 2), 11])])
def test_resample_with_nat(self, periods, values, freq, expected_values):
# GH 13224
index = PeriodIndex(periods, freq='S')
frame = DataFrame(values, index=index)
expected_index = period_range('1970-01-01 00:00:00',
periods=len(expected_values), freq=freq)
expected = DataFrame(expected_values, index=expected_index)
result = frame.resample(freq).mean()
assert_frame_equal(result, expected)
def test_resample_with_only_nat(self):
# GH 13224
pi = PeriodIndex([pd.NaT] * 3, freq='S')
frame = DataFrame([2, 3, 5], index=pi)
expected_index = PeriodIndex(data=[], freq=pi.freq)
expected = DataFrame([], index=expected_index)
result = frame.resample('1s').mean()
assert_frame_equal(result, expected)
class TestTimedeltaIndex(Base):
_index_factory = lambda x: timedelta_range
@pytest.fixture
def _index_start(self):
return '1 day'
@pytest.fixture
def _index_end(self):
return '10 day'
@pytest.fixture
def _series_name(self):
return 'tdi'
def create_series(self):
i = timedelta_range('1 day',
'10 day', freq='D')
return Series(np.arange(len(i)), index=i, name='tdi')
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
def test_resample_with_nat(self):
# GH 13223
index = pd.to_timedelta(['0s', pd.NaT, '2s'])
result = DataFrame({'value': [2, 3, 5]}, index).resample('1s').mean()
expected = DataFrame({'value': [2.5, np.nan, 5.0]},
index=timedelta_range('0 day',
periods=3,
freq='1S'))
assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod(self):
# GH 13022
index = timedelta_range('00:00:00', '00:10:00', freq='5T')
df = DataFrame(data={'value': [1, 5, 10]}, index=index)
result = df.resample('2T').asfreq()
expected_data = {'value': [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(data=expected_data,
index=timedelta_range('00:00:00',
'00:10:00', freq='2T'))
tm.assert_frame_equal(result, expected)
class TestResamplerGrouper(object):
def setup_method(self, method):
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)},
index=date_range('1/1/2000',
freq='s',
periods=40))
def test_tab_complete_ipython6_warning(self, ip):
from IPython.core.completer import provisionalcompleter
code = dedent("""\
import pandas.util.testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
""")
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('rs.', 1))
def test_deferred_with_groupby(self):
# GH 12486
# support deferred resample ops with groupby
data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3],
['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7],
['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5],
['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1],
['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]]
df = DataFrame(data, columns=['date', 'id', 'score'])
df.date = pd.to_datetime(df.date)
f = lambda x: x.set_index('date').resample('D').asfreq()
expected = df.groupby('id').apply(f)
result = df.set_index('date').groupby('id').resample('D').asfreq()
assert_frame_equal(result, expected)
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
f = lambda x: x.resample('1D').ffill()
expected = df.groupby('group').apply(f)
result = df.groupby('group').resample('1D').ffill()
assert_frame_equal(result, expected)
def test_getitem(self):
g = self.frame.groupby('A')
expected = g.B.apply(lambda x: x.resample('2s').mean())
result = g.resample('2s').B.mean()
assert_series_equal(result, expected)
result = g.B.resample('2s').mean()
assert_series_equal(result, expected)
result = g.resample('2s').mean().B
assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}]
df = DataFrame(data, index=pd.date_range('2016-01-01', periods=2))
r = df.groupby('id').resample('1D')
result = r['buyer'].count()
expected = Series([1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp('2016-01-01')),
(2, Timestamp('2016-01-02'))],
names=['id', None]),
name='buyer')
assert_series_equal(result, expected)
result = r['buyer'].count()
assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem(self):
# GH 17813
df = pd.DataFrame({'id': list('aabbb'),
'date': pd.date_range('1-1-2016', periods=5),
'data': 1})
exp = df.set_index('date').groupby('id').resample('2D')['data'].sum()
result = df.groupby('id').resample('2D', on='date')['data'].sum()
assert_series_equal(result, exp)
def test_nearest(self):
# GH 17496
# Resample nearest
index = pd.date_range('1/1/2000', periods=3, freq='T')
result = Series(range(3), index=index).resample('20s').nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
['2000-01-01 00:00:00', '2000-01-01 00:00:20',
'2000-01-01 00:00:40', '2000-01-01 00:01:00',
'2000-01-01 00:01:20', '2000-01-01 00:01:40',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]',
freq='20S'))
assert_series_equal(result, expected)
def test_methods(self):
g = self.frame.groupby('A')
r = g.resample('2s')
for f in ['first', 'last', 'median', 'sem', 'sum', 'mean',
'min', 'max']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
for f in ['size']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['count']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
# series only
for f in ['nunique']:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['nearest', 'backfill', 'ffill', 'asfreq']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample('2s').ohlc())
assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply(self):
g = self.frame.groupby('A')
r = g.resample('2s')
# reduction
expected = g.resample('2s').sum()
def f(x):
return x.resample('2s').sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample('2s').apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_apply_with_mutated_index(self):
# GH 15169
index = pd.date_range('1-1-2015', '12-31-15', freq='D')
df = DataFrame(data={'col1': np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=['a', 'b'])
return s
expected = df.groupby(pd.Grouper(freq='M')).apply(f)
result = df.resample('M').apply(f)
assert_frame_equal(result, expected)
# A case for series
expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f)
result = df['col1'].resample('M').apply(f)
assert_series_equal(result, expected)
def test_resample_groupby_with_label(self):
# GH 13235
index = date_range('2000-01-01', freq='2D', periods=5)
df = DataFrame(index=index,
data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]}
)
result = df.groupby('col0').resample('1W', label='left').sum()
mi = [np.array([0, 0, 1, 2]),
pd.to_datetime(np.array(['1999-12-26', '2000-01-02',
'2000-01-02', '2000-01-02'])
)
]
mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None])
expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]},
index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window(self):
# consistent return values with window
df = self.frame
expected = pd.Int64Index([1, 2, 3], name='A')
result = df.groupby('A').resample('2s').mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby('A').rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns(self):
# GH 14233
df = DataFrame(np.random.randn(20, 3),
columns=list('aaa'),
index=pd.date_range('2012-01-01', periods=20, freq='s'))
df2 = df.copy()
df2.columns = ['a', 'b', 'c']
expected = df2.resample('5s').median()
result = df.resample('5s').median()
expected.columns = result.columns
assert_frame_equal(result, expected)
class TestTimeGrouper(object):
def setup_method(self, method):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
expected = self.ts.groupby(lambda x: x.year).count()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
result = self.ts.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = self.ts.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', closed='right').prod()
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
with catch_warnings(record=True):
wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert (isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex')
index_funcs = (tm.makeIntIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assert_raises_regex(TypeError,
"Only valid with "
"DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an "
"instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order(self):
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 7453
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_resample_entirly_nat_window(self, method, unit):
s = pd.Series([0] * 2 + [np.nan] * 2,
index=pd.date_range('2017', periods=4))
# 0 / 1 by default
result = methodcaller(method)(s.resample("2d"))
expected = pd.Series([0.0, unit],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
# min_count=0
result = methodcaller(method, min_count=0)(s.resample("2d"))
expected = pd.Series([0.0, unit],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
# min_count=1
result = methodcaller(method, min_count=1)(s.resample("2d"))
expected = pd.Series([0.0, np.nan],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, fill_value', [
('min', np.nan),
('max', np.nan),
('sum', 0),
('prod', 1),
('count', 0),
])
def test_aggregate_with_nat(self, func, fill_value):
# check TimeGrouper's aggregation is identical as normal groupby
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[fill_value] * 4], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_aggregate_with_nat_size(self):
# GH 9925
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = normal_grouped.size()
dt_result = dt_grouped.size()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_series_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_repr(self):
# GH18203
result = repr(TimeGrouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
assert result == expected
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_upsample_sum(self, method, unit):
s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H"))
resampled = s.resample("30T")
index = pd.to_datetime(['2017-01-01T00:00:00',
'2017-01-01T00:30:00',
'2017-01-01T01:00:00'])
# 0 / 1 by default
result = methodcaller(method)(resampled)
expected = pd.Series([1, unit, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count=0
result = methodcaller(method, min_count=0)(resampled)
expected = pd.Series([1, unit, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count=1
result = methodcaller(method, min_count=1)(resampled)
expected = pd.Series([1, np.nan, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count>1
result = methodcaller(method, min_count=2)(resampled)
expected = pd.Series([np.nan, np.nan, np.nan], index=index)
tm.assert_series_equal(result, expected)
| 39.55112 | 79 | 0.535954 |
7d40576b22bc776c7186de9f450f573f6bf7ae88 | 1,718 | py | Python | jp.atcoder/abc016/abc016_1/21878992.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc016/abc016_1/21878992.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc016/abc016_1/21878992.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Generator, NoReturn
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = self.async_readlines()
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import ABC, abstractmethod
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
m = reader.int()
d = reader.int()
self.m = m
self.d = d
def solve(self):
m, d = self.m, self.d
print(
"YES" if m % d == 0 else "NO",
)
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == "__main__":
main()
| 16.361905 | 43 | 0.485448 |
31009814631d33c2e57bcf019bdba6446cab7edb | 387 | py | Python | mymap/mymap/wsgi.py | silveiratcl/teste_GeoDjango | 02f3566ae26f8538b2fe3f50045d4c619ff79ced | [
"PostgreSQL"
] | null | null | null | mymap/mymap/wsgi.py | silveiratcl/teste_GeoDjango | 02f3566ae26f8538b2fe3f50045d4c619ff79ced | [
"PostgreSQL"
] | null | null | null | mymap/mymap/wsgi.py | silveiratcl/teste_GeoDjango | 02f3566ae26f8538b2fe3f50045d4c619ff79ced | [
"PostgreSQL"
] | null | null | null | """
WSGI config for mymap project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mymap.settings')
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
c8fded3a6fd93d5e7ce6b551f4029d64a77efd8d | 29,463 | py | Python | ads/ads.py | adamcath/ads | 12078a28d108811a3a90a937a3fca2ba83462e72 | [
"MIT"
] | 15 | 2016-04-10T06:28:31.000Z | 2020-12-01T17:41:20.000Z | ads/ads.py | adamcath/ads | 12078a28d108811a3a90a937a3fca2ba83462e72 | [
"MIT"
] | 33 | 2015-02-23T02:00:42.000Z | 2018-06-04T16:28:45.000Z | ads/ads.py | adamcath/ads | 12078a28d108811a3a90a937a3fca2ba83462e72 | [
"MIT"
] | 6 | 2015-07-06T18:02:04.000Z | 2018-04-22T02:37:45.000Z | import sys
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def debug(msg):
print(colors.OKBLUE + msg + colors.ENDC)
sys.stdout.flush()
def info(msg):
print(colors.OKGREEN + "--- " + msg + colors.ENDC)
sys.stdout.flush()
def error(msg):
sys.stderr.write(colors.FAIL + "!!! " + msg + "\n" + colors.ENDC)
sys.stderr.flush()
def separator():
return "--------------------------------"
try:
import yaml
except ImportError:
error(
"ads requires the python package 'pyyaml'.\n"
"Please install it with 'pip install pyyaml' or 'easy_install pyyaml'\n"
"(disregard the message about 'forcing --no-libyaml')")
sys.exit(1)
import os
import tempfile
import subprocess
import argparse
import glob
import time
from collections import OrderedDict
##############################################
# Treelisting
##############################################
class Treelisting:
def __init__(self, sections=None):
self.sections = sections or []
def with_section(self, heading, listing_dict, empty_msg=None):
self.sections.append((heading, listing_dict, empty_msg))
return self
def pretty_print(self):
all_keys = [
k
for (heading, listing_dict, _) in self.sections
for k in listing_dict.keys()]
if len(all_keys) == 0:
return
column_width = max(map(len, all_keys)) + 1
for (heading, listing_dict, empty_msg) in self.sections:
empty = len(listing_dict) == 0
print("\n" + heading + (empty and "\n " + empty_msg or ""))
if not empty:
for (k, v) in listing_dict.items():
print(("%" + str(column_width) + "s: %s") % (k, v))
##############################################
# subprocess stuff
##############################################
def _shell_get_output(cmd_str, working_dir):
process = subprocess.Popen(
cmd_str,
shell=True,
stdout=subprocess.PIPE,
stderr=file("/dev/null", "w"),
close_fds=True,
cwd=working_dir)
return process.communicate()[0]
STREAM = "stream"
BUFFER = "buffer"
NULL = "null"
def _shell(cmd_str, working_dir, output_mode=STREAM):
if output_mode == STREAM:
out_file = None
elif output_mode == BUFFER:
out_file = tempfile.NamedTemporaryFile()
elif output_mode == NULL:
out_file = open(os.devnull, 'w')
else:
raise Error("Unknown output_mode '%s'" % output_mode)
# Write the command into a file and invoke bash on it
cmd_file = tempfile.NamedTemporaryFile()
cmd_file.write("""
echo 'cd %s'
cat <<ADS_EOF
%s
ADS_EOF
%s
""" % (working_dir, cmd_str, cmd_str))
cmd_file.flush()
try:
status = subprocess.Popen(
["/bin/bash", cmd_file.name],
close_fds=True,
cwd=working_dir,
# Same file for stdout and stderr to preserve order (roughly)
stdout=out_file,
stderr=out_file).wait()
except KeyboardInterrupt:
# Suppress python from printing a stack trace
status = 47
pass
cmd_file.close()
if output_mode == BUFFER:
out_file.seek(0)
output = out_file.read()
out_file.close()
return status, output
else:
return status, None
##############################################
# YML stuff
##############################################
class ParseProjectException(Exception):
def __init__(self, msg):
super(ParseProjectException, self).__init__(msg)
def _expect(expected_type, actual, origin_file):
if not isinstance(actual, expected_type):
raise ParseProjectException(
"%s: Expected %s, got %s: %s" %
(origin_file, str(expected_type), type(actual), str(actual)))
def _load_spec_file(path):
result = yaml.safe_load(file(path, "r").read()) or {}
_expect(dict, result, path)
return result
##############################################
# Service
##############################################
def _abs_to_cwd_rel(abspath):
return os.path.relpath(abspath, os.path.abspath(os.curdir))
class Service:
@classmethod
def load(cls, svc_yml, name):
spec = _load_spec_file(svc_yml)
return Service(name,
os.path.dirname(svc_yml),
spec.get("description"),
spec.get("start_cmd"),
spec.get("stop_cmd"),
spec.get("status_cmd"),
spec.get("log_paths"),
spec.get("err_log_paths"))
@classmethod
def as_printable_dict(cls, services):
return dict([
(s.name, s.get_description_or_default()) for s in services])
def __init__(self, name, home, description=None,
start_cmd=None, stop_cmd=None, status_cmd=None,
log_paths=None, err_log_paths=None):
self.name = name
self.home = home
self.description = description
self.start_cmd = start_cmd
self.stop_cmd = stop_cmd
self.status_cmd = status_cmd
self.log_paths = log_paths or []
self.err_log_paths = err_log_paths or []
def resolve_logs_relative_to_cwd(self, log_type):
if log_type == "general":
log_paths = self.log_paths
elif log_type == "error":
log_paths = self.err_log_paths
else:
assert False, "Unknown log_type %s" % log_type
result = []
for logfile in log_paths:
abs_log_glob = os.path.join(self.home, logfile)
result = result + [
_abs_to_cwd_rel(abs_log_file)
for abs_log_file
in glob.iglob(abs_log_glob)]
return result
def resolve_home_relative_to_cwd(self):
return _abs_to_cwd_rel(self.home)
def get_description_or_default(self):
return self.description or "(No description)"
def __repr__(self):
return self.name
##############################################
# ServiceSet
##############################################
class BadSelectorException(Exception):
def __init__(self, msg):
super(BadSelectorException, self).__init__(msg)
def _resolve(selector, project, service_sets_by_name, selector_stack):
assert selector
if selector in selector_stack:
stack_as_list = list(selector_stack) + [selector]
raise BadSelectorException(
"Definition of selector '%s' is circular: %s" %
(stack_as_list[0], " -> ".join(stack_as_list)))
if selector == "all":
return frozenset(project.services_by_name.keys())
if selector in project.services_by_name:
return frozenset([project.services_by_name[selector].name])
if selector in service_sets_by_name:
selector_stack[selector] = True
sub_results = map(lambda s: _resolve(s,
project,
service_sets_by_name,
selector_stack),
service_sets_by_name[selector].selectors)
selector_stack.popitem(True)
return frozenset(reduce(frozenset.__or__, sub_results))
stack_as_list = list(selector_stack) + [selector]
raise BadSelectorException(
"No service or selector named '%s'. Reference chain: %s" %
(selector, " -> ".join(stack_as_list)))
class ServiceSet:
@classmethod
def load(cls, name, spec, origin_file):
_expect(list, spec, origin_file)
selectors = []
for selector in spec:
_expect(str, selector, origin_file)
selectors.append(selector)
return ServiceSet(name, selectors)
@classmethod
def load_multiple(cls, spec, origin_file):
spec = spec or {}
_expect(dict, spec, origin_file)
return [ServiceSet.load(name, value, origin_file)
for (name, value)
in spec.items()]
@classmethod
def load_default(cls, spec, origin_file):
if not spec:
return None
_expect(str, spec, origin_file)
return spec
@classmethod
def resolve(cls, selector, project, service_sets):
return _resolve(selector,
project,
dict((s.name, s) for s in service_sets),
OrderedDict())
@classmethod
def as_printable_dict(cls, service_sets):
return dict([(s.name, ', '.join(s.selectors)) for s in service_sets])
def __init__(self, name, selector_set):
self.name = name
self.selectors = selector_set
##############################################
# Project
##############################################
def _find_project_yml(search_start):
maybe_root = os.path.join(search_start, "adsroot.yml")
if os.path.isfile(maybe_root):
return maybe_root
parent = os.path.dirname(search_start)
if parent == search_start:
return None
else:
return _find_project_yml(parent)
def _find_service_ymls(project_root):
find_output = _shell_get_output(
"/usr/bin/find . -mindepth 2 -name ads.yml -or -name adsroot.yml",
project_root).splitlines()
nested_project_dirs = [
os.path.dirname(path)
for path in find_output
if os.path.basename(path) == "adsroot.yml"
]
def in_nested_project_dir(path_str):
for dir_path in nested_project_dirs:
if path_str.startswith(dir_path):
return True
return False
# BEWARE: O(n*m) algorithm!
return [
os.path.join(project_root, p)
for p in find_output
if os.path.basename(p) == "ads.yml" and not in_nested_project_dir(p)
]
def _adsfiles_to_service_names(adsfiles):
svc_name_to_file = {}
file_to_svc_name = {}
for f in adsfiles:
basename = os.path.basename(os.path.dirname(f))
if basename in svc_name_to_file:
raise Exception("not yet implemented")
svc_name_to_file[basename] = f
file_to_svc_name[f] = basename
return file_to_svc_name
class Project:
@classmethod
def load_from_dir(cls, root_dir):
project_yml = _find_project_yml(os.path.abspath(root_dir))
if not project_yml:
return None
service_ymls = _find_service_ymls(os.path.dirname(project_yml))
return Project.load_from_files(project_yml, service_ymls)
@classmethod
def load_from_files(cls, project_yml, svc_ymls):
spec = _load_spec_file(project_yml)
home = os.path.dirname(project_yml)
name = spec.get("name") or os.path.basename(home)
services = [
Service.load(svc_file, svc_name)
for (svc_file, svc_name)
in _adsfiles_to_service_names(svc_ymls).items()
]
service_sets = ServiceSet.load_multiple(
spec.get("groups"), project_yml)
default_selector = ServiceSet.load_default(
spec.get("default"), project_yml) or "all"
return Project(name, home, services, service_sets, default_selector)
def __init__(self,
name, home,
services=None, service_sets=None,
default_selector="all"):
self.name = name
self.home = home
self.services_by_name = dict((s.name, s) for s in (services or []))
self.service_sets = service_sets or []
self.default_selector = default_selector
##############################################
# Profile
##############################################
class Profile:
@classmethod
def load_from_dir(cls, profile_dir):
rc_path = os.path.join(profile_dir, ".ads_profile.yml")
if not os.path.isfile(rc_path):
return Profile()
rc_spec = _load_spec_file(rc_path)
return Profile(
ServiceSet.load_multiple(rc_spec.get("groups"), rc_path),
ServiceSet.load_default(rc_spec.get("default"), rc_path))
def __init__(self, service_sets=None, default_selector=None):
self.service_sets = service_sets or []
self.default_selector = default_selector
##############################################
# Ads
##############################################
class Ads:
@staticmethod
def load_from_fs(root_dir, profile_dir):
project = Project.load_from_dir(root_dir)
if not project:
return None
profile = Profile.load_from_dir(profile_dir) or Profile()
return Ads(project, profile)
@staticmethod
def load_from_env():
profile_home = os.getenv("ADS_PROFILE_HOME")
if not profile_home or len(profile_home) == 0:
profile_home = os.path.expanduser("~")
return Ads.load_from_fs(os.curdir, profile_home)
def __init__(self, project, profile=Profile()):
self.project = project
self.profile = profile
def resolve(self, selector):
if selector == "default":
selector = self.get_default_selector()
return ServiceSet.resolve(
selector,
self.project,
self.project.service_sets + self.profile.service_sets)
def get_default_selector(self):
return (self.profile.default_selector or
self.project.default_selector)
def list(self):
default_selector = self.get_default_selector()
try:
default_description = ', '.join(self.resolve(default_selector))
except BadSelectorException:
default_description = "(Unresolved)"
if default_description == default_selector:
default_service = self.project.services_by_name[default_selector]
default_description = default_service.get_description_or_default()
(Treelisting()
.with_section(
"All services in current project (%s):" % self.project.name,
Service.as_printable_dict(self.project.services_by_name.values()),
"None (create ads.yml files in this dir tree)")
.with_section(
"Groups defined in current project:",
ServiceSet.as_printable_dict(self.project.service_sets),
"None (add 'groups' to adsroot.yml)")
.with_section(
"Groups defined in your ads profile:",
ServiceSet.as_printable_dict(self.profile.service_sets),
"None (add 'groups' to ~/.ads_profile.yml)")
.with_section(
"Default service for commands if none are specified:",
{default_selector: default_description})
).pretty_print()
##############################################
# Customized ArgumentParser
##############################################
class MyArgParser(argparse.ArgumentParser):
def error(self, message):
if "too few arguments" in message:
# Default behavior of "ads" is too punishing
# This behavior matches git
self.print_help()
sys.exit(2)
else:
super(MyArgParser, self).error(message)
##############################################
# AdsCommand
##############################################
class AdsCommandException(Exception):
def __init__(self, exit_code, msg=None):
self.exit_code = exit_code
self.msg = msg
class UsageError(AdsCommandException):
def __init__(self, msg):
super(UsageError, self).__init__(2, msg)
class NotFound(AdsCommandException):
def __init__(self, msg):
super(NotFound, self).__init__(11, msg)
class InternalError(AdsCommandException):
def __init__(self, msg):
super(InternalError, self).__init__(50, msg)
class StartFailed(AdsCommandException):
def __init__(self, msg):
super(StartFailed, self).__init__(21, msg)
class StopFailed(AdsCommandException):
def __init__(self, msg):
super(StopFailed, self).__init__(22, msg)
class SomeDown(AdsCommandException):
def __init__(self):
super(SomeDown, self).__init__(23)
def _load_or_die():
ads = Ads.load_from_env()
if not ads:
raise UsageError(
"ads must be run from within an ads project. "
"See README for more.")
return ads
def _tail(files):
status = _shell("tail -F " + " \\\n\t".join(files), os.curdir)[0]
return (status == 0 or
status == 47) # tail was ended by ctrl+c (Mac OS)
def _cat(files):
return _shell("cat " + " ".join(files), os.curdir)[0] == 0
def _status(service, verbose):
if not service.status_cmd:
running = False
msg = "status command not defined"
else:
if verbose:
debug("Checking if %s is running" % service.name)
running = _shell(service.status_cmd,
service.home,
verbose and STREAM or NULL)[0] == 0
msg = running and "ok" or "not running"
info(service.name + ": " + msg)
return running
def _is_running(service, verbose):
return _shell(service.status_cmd,
service.home,
verbose and STREAM or NULL)[0] == 0
def _up(service, verbose):
# Is it running?
if not service.status_cmd:
error("Status command not defined for " + service.name +
"; can't tell if it's already running")
return False
if verbose:
debug("Checking if %s is already running" % service.name)
if _is_running(service, verbose):
info(service.name + " is already running")
return True
# Is start defined?
if not service.start_cmd:
error("Start command not defined for " + service.name)
return False
# Do it
info("Starting " + service.name)
(status, out) = _shell(service.start_cmd, service.home,
verbose and STREAM or BUFFER)
if status == 0:
if verbose:
debug("Started " + service.name)
return True
else:
error("Failed to start " + service.name)
if not verbose:
sys.stderr.write(out)
error(separator())
else:
# Output was already streamed
pass
return False
def _down(service, verbose):
# Is it running?
if not service.status_cmd:
error("Status command not defined for " + service.name +
"; can't tell if it's already stopped")
return False
if verbose:
debug("Checking if %s is running" % service.name)
if not _is_running(service, verbose):
info(service.name + " is already stopped")
return True
# Is stop defined?
if not service.stop_cmd:
error("Stop command not defined for " + service.name)
return False
# Do it
attempts = 0
info("Stopping %s" % service.name)
while True:
(status, out) = _shell(service.stop_cmd, service.home,
verbose and STREAM or BUFFER)
attempts = attempts + 1
if status == 0:
if verbose:
debug("Stop command succeeded")
else:
error("Stop command failed")
if not verbose:
sys.stderr.write(out)
error(separator())
else:
# Output was already streamed
pass
return False
if not _is_running(service, verbose):
if verbose:
debug("Status says %s is down" % service.name)
return True
elif attempts > 10:
error(("Stop command succeeded, but status says %s " +
"is still running. This is a bug in your ads.yml. " +
"If you can reproduce this, try with -v to debug.")
% service.name)
return False
else:
if verbose:
debug("%s is still running after stop command; retrying" %
service.name)
time.sleep(0.5)
def _collect_rel_homes(services):
return [s.resolve_home_relative_to_cwd() for s in services]
def _add_verbose_arg(parser):
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="show output of commands that ads delegates to")
def _add_services_arg(parser):
parser.add_argument(
"service",
nargs="*",
help="The services or groups to act on")
def _resolve_selectors(ads, selectors, fail_if_empty):
if len(selectors) == 0:
selectors = ["default"]
try:
service_names = reduce(frozenset.__or__,
[ads.resolve(s) for s in selectors])
except BadSelectorException as e:
raise NotFound(str(e))
services = map(
lambda name: ads.project.services_by_name[name],
sorted(service_names))
if fail_if_empty and len(services) == 0:
raise NotFound("No services found that match '%s'" %
' '.join(selectors))
return services
def _collect_logs_nonempty(services, log_type):
all_logs = []
for s in services:
all_logs += s.resolve_logs_relative_to_cwd(log_type)
if len(all_logs) == 0:
raise NotFound("No %s log files found for services %s" %
(log_type, str(services)))
return all_logs
def list_func(args):
parser = MyArgParser(prog=cmd_list.name, description=cmd_list.description)
parser.parse_args(args)
ads = _load_or_die()
ads.list()
def up(args):
parser = MyArgParser(prog=cmd_up.name, description=cmd_up.description)
_add_verbose_arg(parser)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, True)
if len(services) > 1:
info("Starting " + str(services))
if not all(map(lambda sp: _up(sp, parsed_args.verbose), services)):
raise StartFailed("One or more services failed to start")
def down(args):
parser = MyArgParser(prog=cmd_down.name, description=cmd_down.description)
_add_verbose_arg(parser)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, True)
if not all(map(lambda sp: _down(sp, parsed_args.verbose), services)):
raise StopFailed("One or more services failed to stop")
def bounce(args):
parser = MyArgParser(prog=cmd_bounce.name,
description=cmd_bounce.description)
_add_verbose_arg(parser)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, True)
all_stopped = all(
map(lambda sp: _down(sp, parsed_args.verbose), services))
all_started = all(
map(lambda sp: _up(sp, parsed_args.verbose), services))
if not all_stopped:
raise StopFailed("One or more services failed to stop")
if not all_started:
raise StartFailed("One or more services failed to restart")
def status(args):
parser = MyArgParser(prog=cmd_status.name,
description=cmd_status.description)
_add_verbose_arg(parser)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, False)
if not all(map(lambda sp: _status(sp, parsed_args.verbose), services)):
raise SomeDown()
def logs(args):
parser = MyArgParser(prog=cmd_logs.name, description=cmd_logs.description)
sub_cmd_gp = parser.add_mutually_exclusive_group()
sub_cmd_gp.add_argument(
"--tail",
action="store_true",
help="(Default) Follow the logs with tail -f")
sub_cmd_gp.add_argument(
"--list",
action="store_true",
help="List the paths of all log files which exist "
"(useful for pipelining)")
sub_cmd_gp.add_argument(
"--cat",
action="store_true",
help="Dump the contents of all log files to stdout")
which_logs_gp = parser.add_mutually_exclusive_group()
which_logs_gp.add_argument(
"--general",
action="store_true",
help="(Default) Show the general logs specified by the "
"log_paths field")
which_logs_gp.add_argument(
"--errors",
action="store_true",
help="Show the error logs specified by the err_log_paths field")
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
if parsed_args.errors:
log_type = "error"
else:
# Default
log_type = "general"
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, False)
resolved_log_paths = _collect_logs_nonempty(services, log_type)
if parsed_args.list:
print("\n".join(resolved_log_paths))
elif parsed_args.cat:
if not _cat(resolved_log_paths):
raise InternalError("cat command failed")
else:
# Default
if not _tail(resolved_log_paths):
raise InternalError("tail command failed")
def home(args):
parser = MyArgParser(prog=cmd_home.name, description=cmd_home.description)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, True)
print("\n".join(_collect_rel_homes(services)))
def edit(args):
parser = MyArgParser(prog=cmd_edit.name, description=cmd_edit.description)
_add_services_arg(parser)
parsed_args = parser.parse_args(args)
ads = _load_or_die()
services = _resolve_selectors(ads, parsed_args.service, True)
homes = _collect_rel_homes(services)
ymls = [os.path.join(home, "ads.yml") for home in homes]
editor = os.environ.get('EDITOR', 'vi')
subprocess.call([editor] + ymls)
class Cmd:
def __init__(self, name, func, description, is_common=False, aliases=None):
self.name = name
self.func = func
self.description = description
self.is_common = is_common
self.aliases = aliases or []
cmd_help = Cmd(
"help", None,
"Display help about ads")
cmd_list = Cmd(
"list", list_func,
"Print the list of available services", True)
cmd_up = Cmd(
"up", up,
"Ensure the specified services are running", True,
["start", "run"])
cmd_down = Cmd(
"down", down,
"Ensure the specified services are not running", True,
["stop", "kill"])
cmd_status = Cmd(
"status", status,
"Print status of the specified services", True)
cmd_logs = Cmd(
"logs", logs,
"Tail the logs of the specified services", True)
cmd_bounce = Cmd(
"bounce", bounce,
"Stop and restart the specified services", False,
["restart"])
cmd_home = Cmd(
"home", home,
"Print paths to the specified services' home directories")
cmd_edit = Cmd(
"edit", edit,
"Edit a service's ads.yml")
all_cmds = [cmd_help, cmd_list, cmd_up, cmd_down, cmd_status, cmd_logs,
cmd_bounce, cmd_home, cmd_edit]
cmds_by_alias = dict([
(name, cmd)
for cmd in all_cmds
for name in ([cmd.name] + cmd.aliases)])
##############################################
# main
##############################################
def fail(exit_status, msg=None):
if msg:
error(msg)
exit(exit_status)
def format_help_for_cmds(cmds):
return "\n".join([" %-10s %s" % (c.name, c.description) for c in cmds])
def create_main_arg_parser():
epilog = """
The most commonly used ads commands are:
%s
Some less common commands:
%s
See 'ads help <command>' to read about a specific subcommand.
""" % (format_help_for_cmds(filter(lambda cmd: cmd.is_common, all_cmds)),
format_help_for_cmds(filter(lambda cmd: not cmd.is_common, all_cmds)))
usage = "ads [-h] <command> [args] [service [service ...]]"
parser = MyArgParser(
prog="ads",
description="Start, stop, and manage microservices in a codebase",
epilog=epilog,
usage=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"command",
metavar="<command>",
choices=cmds_by_alias.keys(),
help="Do something to a service")
return parser
main_parser = create_main_arg_parser()
def help(args):
parser = MyArgParser(prog=cmd_help.name, description=cmd_help.description)
parser.add_argument(
"command",
metavar="<command>",
nargs="?",
choices=cmds_by_alias.keys(),
help="command to learn about")
parsed_args = parser.parse_args(args)
if parsed_args.command:
cmds_by_alias[parsed_args.command].func(["-h"])
else:
main_parser.print_help()
cmds_by_alias["help"].func = help
def main():
cmd_args = sys.argv[1:2]
subcmd_args = sys.argv[2:]
args = main_parser.parse_args(cmd_args)
if args.command == "help" and len(subcmd_args) == 0:
main_parser.print_help()
return
try:
cmds_by_alias[args.command].func(subcmd_args)
except AdsCommandException as e:
fail(e.exit_code, e.msg)
| 29.881339 | 80 | 0.597292 |
f5d47ed24a34da8a8c4705011de24e906e67be1a | 2,657 | py | Python | core/cost/HMLogLikelihood.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | null | null | null | core/cost/HMLogLikelihood.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | null | null | null | core/cost/HMLogLikelihood.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | null | null | null | import tensorflow as tf
from core.argo.core.network.AbstractModule import AbstractModule
class HMLogLikelihood(AbstractModule):
def __init__(self, name="HMLL"):
super().__init__(name = name)
def create_id(self, cost_fuction_kwargs):
_id = "HMLL"
return _id
def _build(self, hm):
n_z_samples = hm.n_z_samples
x_target = hm.x_target
x_distr = hm._hgw[0][0]
h_target = hm._prior_samples
h_distr = hm._hrs[-1][0]
# The loss is composed of two terms:
#
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
reconstruction_loss = self.reconstruction_loss(x_target, n_z_samples, x_distr)
dream_reconstruction_loss = self.reconstruction_loss(h_target, n_z_samples, h_distr)
return None, [[reconstruction_loss], [dream_reconstruction_loss]], [["NLL_X"],["NLL_H"]], [{"fileName" : "reconstruction_loss_NLLX"},{"fileName" : "reconstruction_loss_NLLH"}]
def reconstruction_loss(self, x_target, n_z_samples, distr):
# with tf.variable_scope('LL/reconstruction_loss'):
# no need for LL, sonnet module is already adding that, the line above would produce:
# LL/LL/reconstruction_loss/node_created
with tf.variable_scope('reconstruction_loss'):
# 1) the log_pdf is computed with respect to distribution of the visible
# variables obtained from the target of input of the graph (self.x_target)
# can I avoid replicate? maybe not..
input_shape = x_target.shape.as_list()[1:]
ones = [1] * len(input_shape)
x_replicate = tf.tile(x_target, [n_z_samples] + ones)
x_replicate = tf.reshape(x_replicate, [-1] + distr.logits.shape.as_list()[1:])
reconstr_loss = - distr.log_prob(x_replicate)
#now (ready for arbitrary intermediate samplings)
all_axis_but_first = list(range(len(reconstr_loss.shape)))[1:]
#independent p for each input pixel
log_p = tf.reduce_sum(reconstr_loss, axis=all_axis_but_first)
#average over all the samples and the batch (they are both stacked on the axis 0)
mean_reconstr_loss = tf.reduce_mean(log_p, axis=0, name="reconstruction_loss")
return mean_reconstr_loss
| 39.073529 | 183 | 0.64697 |
23d493093cc08dcc5c1ef9c7fae565054095db95 | 173 | py | Python | vulture/whitelists/argparse_whitelist.py | kianmeng/vulture | b8cbc44dac89b2a96f6da7033424f52525d6f574 | [
"MIT"
] | 2,081 | 2017-03-06T14:45:21.000Z | 2022-03-31T13:29:34.000Z | vulture/whitelists/argparse_whitelist.py | kianmeng/vulture | b8cbc44dac89b2a96f6da7033424f52525d6f574 | [
"MIT"
] | 248 | 2017-03-06T12:13:37.000Z | 2022-03-15T11:21:27.000Z | vulture/whitelists/argparse_whitelist.py | kianmeng/vulture | b8cbc44dac89b2a96f6da7033424f52525d6f574 | [
"MIT"
] | 111 | 2017-03-06T20:48:04.000Z | 2022-03-17T09:49:32.000Z | import argparse
argparse.ArgumentParser().epilog
argparse.ArgumentDefaultsHelpFormatter("prog")._fill_text
argparse.ArgumentDefaultsHelpFormatter("prog")._get_help_string
| 24.714286 | 63 | 0.867052 |
98c7ebf6e12a5b877dbec1df6576215b08ea9a41 | 341 | py | Python | valitor_python/errors.py | overcastsoftware/python-valitor | 537e924c783474009e70f44c1a6aab4ed0df0ba1 | [
"BSD-2-Clause"
] | 1 | 2020-12-25T19:02:44.000Z | 2020-12-25T19:02:44.000Z | valitor_python/errors.py | overcastsoftware/python-valitor | 537e924c783474009e70f44c1a6aab4ed0df0ba1 | [
"BSD-2-Clause"
] | null | null | null | valitor_python/errors.py | overcastsoftware/python-valitor | 537e924c783474009e70f44c1a6aab4ed0df0ba1 | [
"BSD-2-Clause"
] | null | null | null | class ValitorException(Exception):
def __init__(self, number, message, log_id):
print(number, message, log_id)
self.number = number
self.message = message
self.log_id = log_id
class ValitorPayException(Exception):
def __init__(self, message):
print(message)
self.message = message
| 22.733333 | 48 | 0.653959 |
daa5e43f97a37783706ea1b6abee2d99ebe92315 | 26,602 | py | Python | fmsync2/core/FlowmonDDD.py | progress/Flowmon-Various-Scripts | e21d6334366eae3af7a1a8bf2f5735e1460d2aa0 | [
"Apache-2.0"
] | null | null | null | fmsync2/core/FlowmonDDD.py | progress/Flowmon-Various-Scripts | e21d6334366eae3af7a1a8bf2f5735e1460d2aa0 | [
"Apache-2.0"
] | null | null | null | fmsync2/core/FlowmonDDD.py | progress/Flowmon-Various-Scripts | e21d6334366eae3af7a1a8bf2f5735e1460d2aa0 | [
"Apache-2.0"
] | null | null | null | from core.FlowmonREST import FlowmonREST
import requests
import logging
import json
class FlowmonDDD:
def __init__(self, app, rest):
self.rest = rest
self.app = app
# Return information about Groups
def get_groups(self):
r = requests.get(self.rest._url('/rest/iad/groups/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
# no groups defined
self.app.log.info(
'There are no groups configured on host {}.'.format(self.rest.hostname))
elif r.status_code == 404:
# return code 404 if groups do not exists, this way we can detect that v4 of DDD is used
return 404
else:
self.app.log.error(
'Cannot get information about groups:{} - {}'.format(r.status_code, r.content))
return 0
def add_group(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/groups/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new group HTTP CODE {} - {}'.format(r.status_code, r.content))
return 0
def change_group(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/groups/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify group ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_group(self, data):
r = requests.delete(self.rest._url('/rest/iad/groups/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete group ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
# Return information about Segment
# int id ID of segment we want to get
def get_segment(self, id):
r = requests.get(self.rest._url('/rest/iad/segments/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot get information about segment {}: {} - {}'.format(id, r.status_code, r.content))
# Return information about alert configuration of specific segment
# int id ID of segment
def get_segment_alert(self, id):
r = requests.get(self.rest._url('/rest/iad/segments/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
alert = json.loads(r.content)['measures']['alert']
alert = str(alert)
if 'None' == alert:
self.app.log.info('No alerting configured for the segment')
else:
alertId = str(json.loads(r.content)['measures']['alert']['id'])
return self.get_alert(alertId)
else:
self.app.log.error(
'Cannot get information about segment {}: {} - {}'.format(id, r.status_code, r.content))
# Return alert configuration
def get_alert(self, id):
r = requests.get(self.rest._url('/rest/iad/alerts/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot get information about alert {}: {} - {}'.format(id, r.status_code, r.content))
# end def get_alert( self, id ):
# Get specific email template
def get_template(self, id):
r = requests.get(self.rest._url('/rest/iad/email-templates/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot get email template {}: {} - {}'.format(id, r.status_code, r.content))
# end get_template( self, id ):
# This method returns Segment ID fo specific Attack ID
#
def get_attack_segment(self, id):
r = requests.get(self.rest._url('/rest/iad/attacks/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
segment = json.loads(r.content)['segment']['id']
return str(segment)
else:
self.app.log.error(
'Cannot get information about attack {}: {} - {}'.format(id, r.status_code, r.content))
# end def get_attack_segment( self, id ):
# This method returns Segment ID fo specific Attack ID
#
def get_attack(self, id):
r = requests.get(self.rest._url('/rest/iad/attacks/{}'.format(id)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot get information about attack {}: {} - {}'.format(id, r.status_code, r.content))
# end def get_attack_segment( self, id ):
def get_routers(self):
r = requests.get(self.rest._url('/rest/iad/routers/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Routers section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about routers {} - {}'.format(r.status_code, r.content))
return 0
# end def get_routers( self ):
def get_alerts(self):
r = requests.get(self.rest._url('/rest/iad/alerts/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Alerts section is empty on {}.'.format(self.rest.hostname))
return False
else:
self.app.log.error(
'Cannot get information about alerts {} - {}'.format(r.status_code, r.content))
return False
# end def get_alerts( self ):
def get_scrubbing(self):
r = requests.get(self.rest._url('/rest/iad/scrubbing-centers/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Scrubbing section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about scrubbing-centers {} - {}'.format(r.status_code, r.content))
return 0
# end def get_scrubbings( self ):
def get_scrubbing_parameters(self):
r = requests.get(self.rest._url('/rest/iad/scrubbing-center-parameters/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot get information about scrubbing-center parameters {} - {}'.format(r.status_code, r.content))
# end def get_scrubbings_parameters( self ):
def get_report_chapters(self):
r = requests.get(self.rest._url('/rest/iad/report-chapter/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Report chapters section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about report-chapters HTTP CODE {} - {}'.format(r.status_code, r.content))
# end def get_report_chapters( self ):
def get_email_templates(self):
r = requests.get(self.rest._url('/rest/iad/email-templates/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Email templates section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about email-templates HTTP CODE {} - {}'.format(r.status_code, r.content))
# end def get_email_templates( self ):
def get_rules(self):
r = requests.get(self.rest._url('/rest/iad/rules/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Rules section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about rules HTTP CODE {} - {}'.format(r.status_code, r.content))
# end def get_rules( self ):
def get_segments(self):
r = requests.get(self.rest._url('/rest/iad/segments/'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'Segments section is empty on {}.'.format(self.rest.hostname))
return 0
else:
self.app.log.error(
'Cannot get information about segments HTTP CODE {} - {}'.format(r.status_code, r.content))
# end def get_segments( self ):
def add_router(self, router):
payload = {'entity': router}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/routers/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new router ID {} HTTP CODE {} - {}'.format(router['id'], r.status_code, r.content))
return 0
def change_router(self, router):
payload = {'entity': router}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/routers/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify router ID {} HTTP CODE {} - {}'.format(router['id'], r.status_code, r.content))
return 0
def delete_router(self, router):
r = requests.delete(self.rest._url('/rest/iad/routers/{}'.format(router)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete router ID {} HTTP CODE {} - {}'.format(router, r.status_code, r.content))
return 0
def add_scrubbing(self, scrubbing):
payload = {'entity': scrubbing}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/scrubbing-centers/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error('Cannot add new scrubbing-center ID {} HTTP CODE {} - {}'.format(
scrubbing['id'], r.status_code, r.content))
return 0
def change_scrubbing(self, scrubbing):
payload = {'entity': scrubbing}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/scrubbing-centers/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error('Cannot modify scrubbing-center ID {} HTTP CODE {} - {}'.format(
scrubbing['id'], r.status_code, r.content))
return 0
def delete_scrubbing(self, scrubbing):
r = requests.delete(self.rest._url('/rest/iad/scrubbing-centers/{}'.format(
scrubbing)), headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete scrubbing-center ID {} HTTP CODE {} - {}'.format(scrubbing, r.status_code, r.content))
return 0
def add_alert(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/alerts/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new alert ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def change_alert(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/alerts/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify alert ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_alert(self, data):
r = requests.delete(self.rest._url('/rest/iad/alerts/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete alert ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
def change_scrubbing_parametres(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/scrubbing-center-parameters'),
data=payload, headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify alert HTTP CODE {} - {}'.format(r.status_code, r.content))
return 0
def add_chapter(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/report-chapter/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new chapter ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def change_chapter(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/report-chapter/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify chapter ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_chapter(self, data):
r = requests.delete(self.rest._url('/rest/iad/report-chapter/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete chapter ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
def add_template(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/email-templates/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error('Cannot add new email template ID {} HTTP CODE {} - {}'.format(
data['id'], r.status_code, r.content))
return 0
def change_template(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/email-templates/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error('Cannot modify email template ID {} HTTP CODE {} - {}'.format(
data['id'], r.status_code, r.content))
return 0
def delete_template(self, data):
r = requests.delete(self.rest._url('/rest/iad/email-templates/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete email template ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
def add_rule(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/rules/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new rule {} HTTP CODE {} - {}'.format(data['name'], r.status_code, r.content))
return 0
def change_rule(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/rules/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify rule ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_rule(self, data):
r = requests.delete(self.rest._url('/rest/iad/rules/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete rule ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
def add_segment(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/segments/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new segment ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def change_segment(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/segments/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify segment ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_segment(self, data):
r = requests.delete(self.rest._url('/rest/iad/segments/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete segment ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
# Return information about Whitelists
def get_whitelists(self):
r = requests.get(self.rest._url('/rest/iad/whitelists'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'DDD: No whitelistst configured on {}.'.format(self.rest.hostname))
else:
self.app.log.error(
'DDD: Cannot get information about whitelists : {} - {}'.format(r.status_code, r.content))
def add_whitelist(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/whitelists/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new whitelist name {} HTTP CODE {} - {}'.format(data['name'], r.status_code, r.content))
return 0
def change_whitelist(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/whitelists/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify whitelist ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_whitelist(self, data):
r = requests.delete(self.rest._url('/rest/iad/whitelists/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete whitelist ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
# Return information about traffic types
def get_traffictypes(self):
r = requests.get(self.rest._url('/rest/iad/whitelists/traffic-types'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'DDD: Cannot get information about traffic types: {} - {}'.format(r.status_code, r.content))
# Return information about Custom baselines
def get_custombaselines(self):
r = requests.get(self.rest._url('/rest/iad/custom-baselines'),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
elif r.status_code == 204:
self.app.log.info(
'DDD: There are no custom baselines on {}.'.format(self.rest.hostname))
else:
self.app.log.error(
'DDD: Cannot get information about custom baselines : {} - {}'.format(r.status_code, r.content))
def add_custombaseline(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.post(self.rest._url('/rest/iad/custom-baselines/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 201:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot add new custom baseline ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def change_custombaseline(self, data):
payload = {'entity': data}
payload = json.dumps(payload)
r = requests.put(self.rest._url('/rest/iad/custom-baselines/'), data=payload,
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 200:
return json.loads(r.content)
else:
self.app.log.error(
'Cannot modify custom baseline ID {} HTTP CODE {} - {}'.format(data['id'], r.status_code, r.content))
return 0
def delete_custombaseline(self, data):
r = requests.delete(self.rest._url('/rest/iad/custom-baselines/{}'.format(data)),
headers=self.rest.get_header(), verify=self.rest.get_verify())
if r.status_code == 204:
return 1
else:
self.app.log.error(
'Cannot delete custom baseline ID {} HTTP CODE {} - {}'.format(data, r.status_code, r.content))
return 0
| 43.89769 | 118 | 0.561311 |
3bf3f21eb50a90d8cfa10a90860924be67a47fc5 | 7,649 | py | Python | maskrcnn_benchmark/modeling/rpn/inference.py | huttzza/maskrcnn-benchmark | d15971e4b602bb71e5494ce8973293fedf202e58 | [
"MIT"
] | 236 | 2019-02-26T03:18:31.000Z | 2022-03-30T07:02:59.000Z | maskrcnn_benchmark/modeling/rpn/inference.py | huttzza/maskrcnn-benchmark | d15971e4b602bb71e5494ce8973293fedf202e58 | [
"MIT"
] | 35 | 2019-10-15T14:45:12.000Z | 2022-03-31T10:10:56.000Z | maskrcnn_benchmark/modeling/rpn/inference.py | huttzza/maskrcnn-benchmark | d15971e4b602bb71e5494ce8973293fedf202e58 | [
"MIT"
] | 60 | 2019-10-08T00:34:37.000Z | 2022-03-17T14:29:58.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
from ..utils import cat
from .utils import permute_and_flatten
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields(['is_source']) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
new_proposals = []
for proposal, gt_box in zip(proposals, gt_boxes):
if gt_box.get_field('is_source').any():
new_proposals.append(cat_boxlist((proposal, gt_box.copy_with_fields(['objectness']))))
else:
new_proposals.append(proposal)
return new_proposals
def forward_for_single_feature_map(self, anchors, objectness, box_regression):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness.device
N, A, H, W = objectness.shape
# put in the same format as anchors
objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
objectness = objectness.sigmoid()
box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression = box_regression[batch_idx, topk_idx]
image_shapes = [box.size for box in anchors]
concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]
proposals = self.box_coder.decode(
box_regression.view(-1, 4), concat_anchors.view(-1, 4)
)
proposals = proposals.view(N, -1, 4)
result = []
for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
boxlist = BoxList(proposal, im_shape, mode="xyxy")
boxlist.add_field("objectness", score)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
boxlist = boxlist_nms(
boxlist,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result.append(boxlist)
return result
def forward(self, anchors, objectness, box_regression, targets=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
num_levels = len(objectness)
anchors = list(zip(*anchors))
for a, o, b in zip(anchors, objectness, box_regression):
sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
if num_levels > 1:
boxlists = self.select_over_all_levels(boxlists)
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
return boxlists
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
# TODO resolve this difference and make it consistent. It should be per image,
# and not per batch
if self.training:
objectness = torch.cat(
[boxlist.get_field("objectness") for boxlist in boxlists], dim=0
)
box_sizes = [len(boxlist) for boxlist in boxlists]
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
inds_mask = torch.zeros_like(objectness, dtype=torch.uint8)
inds_mask[inds_sorted] = 1
inds_mask = inds_mask.split(box_sizes)
for i in range(num_images):
boxlists[i] = boxlists[i][inds_mask[i]]
else:
for i in range(num_images):
objectness = boxlists[i].get_field("objectness")
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(
objectness, post_nms_top_n, dim=0, sorted=True
)
boxlists[i] = boxlists[i][inds_sorted]
return boxlists
def make_rpn_postprocessor(config, rpn_box_coder, is_train):
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN
if not is_train:
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN
if not is_train:
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST
nms_thresh = config.MODEL.RPN.NMS_THRESH
min_size = config.MODEL.RPN.MIN_SIZE
box_selector = RPNPostProcessor(
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
box_coder=rpn_box_coder,
fpn_post_nms_top_n=fpn_post_nms_top_n,
)
return box_selector
| 37.312195 | 102 | 0.6359 |
65a4c461e75b03102fe2a747926ce0cd3ee2903e | 242 | py | Python | src/netbox_example/__init__.py | steffann/netbox-example-plugin | 9f1c22ae60bb21e7d4685219eb76687c62f0161e | [
"Apache-2.0"
] | 4 | 2020-01-19T04:14:07.000Z | 2020-06-03T17:36:19.000Z | src/netbox_example/__init__.py | steffann/netbox-example-plugin | 9f1c22ae60bb21e7d4685219eb76687c62f0161e | [
"Apache-2.0"
] | null | null | null | src/netbox_example/__init__.py | steffann/netbox-example-plugin | 9f1c22ae60bb21e7d4685219eb76687c62f0161e | [
"Apache-2.0"
] | 1 | 2020-09-17T17:02:43.000Z | 2020-09-17T17:02:43.000Z | default_app_config = 'netbox_example.apps.NetboxExampleConfig'
# NetBox plugin options
netbox_version = '~= 2.6.9'
extra_installed_apps = []
extra_menus = ['example']
required_configuration_settings = []
default_configuration_settings = {}
| 24.2 | 62 | 0.785124 |
95ac1f8df3d0220aec4629ca930112f77e5762ee | 4,731 | py | Python | gmail_yaml_filters/main.py | spacezorro/gmail-yaml-filters | f1652b77397b39319bec5f86e0ccba6159efff56 | [
"MIT"
] | null | null | null | gmail_yaml_filters/main.py | spacezorro/gmail-yaml-filters | f1652b77397b39319bec5f86e0ccba6159efff56 | [
"MIT"
] | null | null | null | gmail_yaml_filters/main.py | spacezorro/gmail-yaml-filters | f1652b77397b39319bec5f86e0ccba6159efff56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from lxml import etree
import argparse
import os
import re
import sys
import yaml
from .ruleset import RuleSet
from .ruleset import ruleset_to_etree
from .upload import get_gmail_credentials
from .upload import get_gmail_service
from .upload import upload_ruleset
from .upload import prune_filters_not_in_ruleset
from .upload import prune_labels_not_in_ruleset
"""
Produces Gmail filter XML files based on a more human-readable YAML spec.
"""
# Unicode support. <http://stackoverflow.com/questions/2890146>
def construct_yaml_str(self, node):
return self.construct_scalar(node)
yaml.Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
def ruleset_to_xml(ruleset, pretty_print=True, encoding='utf8'):
dom = ruleset_to_etree(ruleset)
chars = etree.tostring(
dom,
encoding=encoding,
pretty_print=pretty_print,
xml_declaration=True,
)
return chars.decode(encoding)
def create_parser():
parser = argparse.ArgumentParser()
parser.set_defaults(action='xml')
parser.add_argument('filename', metavar='FILTER_FILE', default='-')
parser.add_argument('-n', '--dry-run', action='store_true', default=False,
help='do not make any API calls to Gmail')
parser.add_argument('--client-secret', metavar='CLIENT_SECRET_FILE', nargs='?',
help='path to client_secret.json; default is wherever the configuration file is located')
# Actions
parser.add_argument('--upload', dest='action', action='store_const', const='upload',
help='create filters and labels in Gmail')
parser.add_argument('--delete', dest='action', action='store_const', const='delete',
help='Deletes all filters in Gmail')
parser.add_argument('--prune', dest='action', action='store_const', const='prune',
help='delete any Gmail filters that are not defined in the configuration file')
parser.add_argument('--sync', dest='action', action='store_const', const='upload_prune',
help='equivalent to --upload and --prune')
# Options for --prune-labels
parser.add_argument('--prune-labels', dest='action', action='store_const', const='prune_labels',
help='delete any Gmail labels which are not used in the configuration file')
parser.add_argument('--only-matching', default=r'.*', metavar='REGEX',
help='only prune labels matching the given expression')
parser.add_argument('--ignore-errors', action='store_true', default=False,
help='ignore HTTP errors when deleting labels')
return parser
def main():
args = create_parser().parse_args()
if args.filename == '-':
default_client_secret = 'client_secret.json'
data = yaml.safe_load(sys.stdin)
else:
default_client_secret = os.path.join(os.path.dirname(args.filename), 'client_secret.json')
with open(args.filename) as inputf:
data = yaml.safe_load(inputf)
if not isinstance(data, list):
data = [data]
ruleset = RuleSet.from_object(rule for rule in data if not rule.get('ignore'))
if not args.client_secret:
args.client_secret = default_client_secret
if args.action == 'xml':
print(ruleset_to_xml(ruleset))
return
# every command below this point involves the Gmail API
credentials = get_gmail_credentials(client_secret_path=args.client_secret)
gmail = get_gmail_service(credentials)
if args.action == 'delete':
emptyruleset={}
prune_filters_not_in_ruleset(emptyruleset, service=gmail, dry_run=args.dry_run)
elif args.action == 'upload':
upload_ruleset(ruleset, service=gmail, dry_run=args.dry_run)
elif args.action == 'prune':
prune_filters_not_in_ruleset(ruleset, service=gmail, dry_run=args.dry_run)
elif args.action == 'upload_prune':
upload_ruleset(ruleset, service=gmail, dry_run=args.dry_run)
prune_filters_not_in_ruleset(ruleset, service=gmail, dry_run=args.dry_run)
elif args.action == 'prune_labels':
match = re.compile(args.only_matching).match if args.only_matching else None
prune_labels_not_in_ruleset(ruleset, service=gmail, match=match, dry_run=args.dry_run,
continue_on_http_error=args.ignore_errors)
else:
raise argparse.ArgumentError('%r not recognized' % args.action)
if __name__ == '__main__':
main()
| 38.153226 | 113 | 0.686747 |
82099016a27029ec60f79ab18051557e53f0f36e | 16,641 | py | Python | mlrun/api/db/sqldb/models/models_mysql.py | AlxZed/mlrun | 4b8fe3dec07764672664f33ac6161e14078c822b | [
"Apache-2.0"
] | null | null | null | mlrun/api/db/sqldb/models/models_mysql.py | AlxZed/mlrun | 4b8fe3dec07764672664f33ac6161e14078c822b | [
"Apache-2.0"
] | null | null | null | mlrun/api/db/sqldb/models/models_mysql.py | AlxZed/mlrun | 4b8fe3dec07764672664f33ac6161e14078c822b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
import warnings
from datetime import datetime, timezone
import orjson
from sqlalchemy import (
BLOB,
JSON,
TIMESTAMP,
Column,
ForeignKey,
Integer,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import class_mapper, relationship
from mlrun.api import schemas
from mlrun.api.utils.db.sql_collation import SQLCollationUtil
Base = declarative_base()
NULL = None # Avoid flake8 issuing warnings when comparing in filter
run_time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
class BaseModel:
def to_dict(self, exclude=None):
"""
NOTE - this function (currently) does not handle serializing relationships
"""
exclude = exclude or []
mapper = class_mapper(self.__class__)
columns = [column.key for column in mapper.columns if column.key not in exclude]
get_key_value = (
lambda c: (c, getattr(self, c).isoformat())
if isinstance(getattr(self, c), datetime)
else (c, getattr(self, c))
)
return dict(map(get_key_value, columns))
class HasStruct(BaseModel):
@property
def struct(self):
return pickle.loads(self.body)
@struct.setter
def struct(self, value):
self.body = pickle.dumps(value)
def to_dict(self, exclude=None):
"""
NOTE - this function (currently) does not handle serializing relationships
"""
exclude = exclude or []
exclude.append("body")
return super().to_dict(exclude)
def make_label(table):
class Label(Base, BaseModel):
__tablename__ = f"{table}_labels"
__table_args__ = (
UniqueConstraint("name", "parent", name=f"_{table}_labels_uc"),
)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
value = Column(String(255, collation=SQLCollationUtil.collation()))
parent = Column(Integer, ForeignKey(f"{table}.id"))
return Label
def make_tag(table):
class Tag(Base, BaseModel):
__tablename__ = f"{table}_tags"
__table_args__ = (
UniqueConstraint("project", "name", "obj_id", name=f"_{table}_tags_uc"),
)
id = Column(Integer, primary_key=True)
project = Column(String(255, collation=SQLCollationUtil.collation()))
name = Column(String(255, collation=SQLCollationUtil.collation()))
obj_id = Column(Integer, ForeignKey(f"{table}.id"))
return Tag
# TODO: don't want to refactor everything in one PR so splitting this function to 2 versions - eventually only this one
# should be used
def make_tag_v2(table):
class Tag(Base, BaseModel):
__tablename__ = f"{table}_tags"
__table_args__ = (
UniqueConstraint("project", "name", "obj_name", name=f"_{table}_tags_uc"),
)
id = Column(Integer, primary_key=True)
project = Column(String(255, collation=SQLCollationUtil.collation()))
name = Column(String(255, collation=SQLCollationUtil.collation()))
obj_id = Column(Integer, ForeignKey(f"{table}.id"))
obj_name = Column(String(255, collation=SQLCollationUtil.collation()))
return Tag
# quell SQLAlchemy warnings on duplicate class name (Label)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
class Artifact(Base, HasStruct):
__tablename__ = "artifacts"
__table_args__ = (
UniqueConstraint("uid", "project", "key", name="_artifacts_uc"),
)
Label = make_label(__tablename__)
Tag = make_tag(__tablename__)
id = Column(Integer, primary_key=True)
key = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
uid = Column(String(255, collation=SQLCollationUtil.collation()))
updated = Column(TIMESTAMP)
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
body = Column(BLOB)
labels = relationship(Label, cascade="all, delete-orphan")
tags = relationship(Tag, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.key}/{self.uid}"
class Function(Base, HasStruct):
__tablename__ = "functions"
__table_args__ = (
UniqueConstraint("name", "project", "uid", name="_functions_uc"),
)
Label = make_label(__tablename__)
Tag = make_tag_v2(__tablename__)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
uid = Column(String(255, collation=SQLCollationUtil.collation()))
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
body = Column(BLOB)
updated = Column(TIMESTAMP)
labels = relationship(Label, cascade="all, delete-orphan")
tags = relationship(Tag, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}/{self.uid}"
class Log(Base, BaseModel):
__tablename__ = "logs"
id = Column(Integer, primary_key=True)
uid = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
body = Column(BLOB)
def get_identifier_string(self) -> str:
return f"{self.project}/{self.uid}"
class Run(Base, HasStruct):
__tablename__ = "runs"
__table_args__ = (
UniqueConstraint("uid", "project", "iteration", name="_runs_uc"),
)
Label = make_label(__tablename__)
Tag = make_tag(__tablename__)
id = Column(Integer, primary_key=True)
uid = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
iteration = Column(Integer)
state = Column(String(255, collation=SQLCollationUtil.collation()))
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
body = Column(BLOB)
start_time = Column(TIMESTAMP)
labels = relationship(Label, cascade="all, delete-orphan")
tags = relationship(Tag, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.uid}/{self.iteration}"
class Schedule(Base, BaseModel):
__tablename__ = "schedules_v2"
__table_args__ = (UniqueConstraint("project", "name", name="_schedules_v2_uc"),)
Label = make_label(__tablename__)
id = Column(Integer, primary_key=True)
project = Column(
String(255, collation=SQLCollationUtil.collation()), nullable=False
)
name = Column(
String(255, collation=SQLCollationUtil.collation()), nullable=False
)
kind = Column(String(255, collation=SQLCollationUtil.collation()))
desired_state = Column(String(255, collation=SQLCollationUtil.collation()))
state = Column(String(255, collation=SQLCollationUtil.collation()))
creation_time = Column(TIMESTAMP)
cron_trigger_str = Column(String(255, collation=SQLCollationUtil.collation()))
last_run_uri = Column(String(255, collation=SQLCollationUtil.collation()))
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
struct = Column(BLOB)
labels = relationship(Label, cascade="all, delete-orphan")
concurrency_limit = Column(Integer, nullable=False)
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}"
@property
def scheduled_object(self):
return pickle.loads(self.struct)
@scheduled_object.setter
def scheduled_object(self, value):
self.struct = pickle.dumps(value)
@property
def cron_trigger(self) -> schemas.ScheduleCronTrigger:
return orjson.loads(self.cron_trigger_str)
@cron_trigger.setter
def cron_trigger(self, trigger: schemas.ScheduleCronTrigger):
self.cron_trigger_str = orjson.dumps(trigger.dict(exclude_unset=True))
# Define "many to many" users/projects
project_users = Table(
"project_users",
Base.metadata,
Column("project_id", Integer, ForeignKey("projects.id")),
Column("user_id", Integer, ForeignKey("users.id")),
)
class User(Base, BaseModel):
__tablename__ = "users"
__table_args__ = (UniqueConstraint("name", name="_users_uc"),)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
class Project(Base, BaseModel):
__tablename__ = "projects"
# For now since we use project name a lot
__table_args__ = (UniqueConstraint("name", name="_projects_uc"),)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
description = Column(String(255, collation=SQLCollationUtil.collation()))
owner = Column(String(255, collation=SQLCollationUtil.collation()))
source = Column(String(255, collation=SQLCollationUtil.collation()))
# the attribute name used to be _spec which is just a wrong naming, the attribute was renamed to _full_object
# leaving the column as is to prevent redundant migration
# TODO: change to JSON, see mlrun/api/schemas/function.py::FunctionState for reasoning
_full_object = Column("spec", BLOB)
created = Column(TIMESTAMP, default=datetime.utcnow)
state = Column(String(255, collation=SQLCollationUtil.collation()))
users = relationship(User, secondary=project_users)
Label = make_label(__tablename__)
labels = relationship(Label, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.name}"
@property
def full_object(self):
if self._full_object:
return pickle.loads(self._full_object)
@full_object.setter
def full_object(self, value):
self._full_object = pickle.dumps(value)
class Feature(Base, BaseModel):
__tablename__ = "features"
id = Column(Integer, primary_key=True)
feature_set_id = Column(Integer, ForeignKey("feature_sets.id"))
name = Column(String(255, collation=SQLCollationUtil.collation()))
value_type = Column(String(255, collation=SQLCollationUtil.collation()))
Label = make_label(__tablename__)
labels = relationship(Label, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}"
class Entity(Base, BaseModel):
__tablename__ = "entities"
id = Column(Integer, primary_key=True)
feature_set_id = Column(Integer, ForeignKey("feature_sets.id"))
name = Column(String(255, collation=SQLCollationUtil.collation()))
value_type = Column(String(255, collation=SQLCollationUtil.collation()))
Label = make_label(__tablename__)
labels = relationship(Label, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}"
class FeatureSet(Base, BaseModel):
__tablename__ = "feature_sets"
__table_args__ = (
UniqueConstraint("name", "project", "uid", name="_feature_set_uc"),
)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
created = Column(TIMESTAMP, default=datetime.now(timezone.utc))
updated = Column(TIMESTAMP, default=datetime.now(timezone.utc))
state = Column(String(255, collation=SQLCollationUtil.collation()))
uid = Column(String(255, collation=SQLCollationUtil.collation()))
_full_object = Column("object", JSON)
Label = make_label(__tablename__)
Tag = make_tag_v2(__tablename__)
labels = relationship(Label, cascade="all, delete-orphan")
tags = relationship(Tag, cascade="all, delete-orphan")
features = relationship(Feature, cascade="all, delete-orphan")
entities = relationship(Entity, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}/{self.uid}"
@property
def full_object(self):
if self._full_object:
return json.loads(self._full_object)
@full_object.setter
def full_object(self, value):
self._full_object = json.dumps(value)
class FeatureVector(Base, BaseModel):
__tablename__ = "feature_vectors"
__table_args__ = (
UniqueConstraint("name", "project", "uid", name="_feature_vectors_uc"),
)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
project = Column(String(255, collation=SQLCollationUtil.collation()))
created = Column(TIMESTAMP, default=datetime.now(timezone.utc))
updated = Column(TIMESTAMP, default=datetime.now(timezone.utc))
state = Column(String(255, collation=SQLCollationUtil.collation()))
uid = Column(String(255, collation=SQLCollationUtil.collation()))
_full_object = Column("object", JSON)
Label = make_label(__tablename__)
Tag = make_tag_v2(__tablename__)
labels = relationship(Label, cascade="all, delete-orphan")
tags = relationship(Tag, cascade="all, delete-orphan")
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}/{self.uid}"
@property
def full_object(self):
if self._full_object:
return json.loads(self._full_object)
@full_object.setter
def full_object(self, value):
self._full_object = json.dumps(value)
class MarketplaceSource(Base, BaseModel):
__tablename__ = "marketplace_sources"
__table_args__ = (UniqueConstraint("name", name="_marketplace_sources_uc"),)
id = Column(Integer, primary_key=True)
name = Column(String(255, collation=SQLCollationUtil.collation()))
index = Column(Integer)
created = Column(TIMESTAMP, default=datetime.now(timezone.utc))
updated = Column(TIMESTAMP, default=datetime.now(timezone.utc))
_full_object = Column("object", JSON)
def get_identifier_string(self) -> str:
return f"{self.project}/{self.name}"
@property
def full_object(self):
if self._full_object:
return json.loads(self._full_object)
@full_object.setter
def full_object(self, value):
self._full_object = json.dumps(value)
class DataVersion(Base, BaseModel):
__tablename__ = "data_versions"
__table_args__ = (UniqueConstraint("version", name="_versions_uc"),)
id = Column(Integer, primary_key=True)
version = Column(String(255, collation=SQLCollationUtil.collation()))
created = Column(TIMESTAMP, default=datetime.now(timezone.utc))
# Must be after all table definitions
_tagged = [cls for cls in Base.__subclasses__() if hasattr(cls, "Tag")]
_labeled = [cls for cls in Base.__subclasses__() if hasattr(cls, "Label")]
_classes = [cls for cls in Base.__subclasses__()]
_table2cls = {cls.__table__.name: cls for cls in Base.__subclasses__()}
| 37.820455 | 119 | 0.658073 |
ca2c845fb49890b2270a65c3ff5fe76bc4c20d31 | 1,103 | py | Python | server/quizes/views.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | null | null | null | server/quizes/views.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | 27 | 2018-09-30T07:59:21.000Z | 2020-11-05T19:25:41.000Z | server/quizes/views.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Quiz
class QuizSerializer(serializers.ModelSerializer):
class Meta:
model = Quiz
fields = ('slug', 'title', 'description')
def to_representation(self, instance):
representation = super().to_representation(instance)
representation['questions'] = self.serialize_questions(instance)
return representation
def serialize_questions(self, instance):
questions = []
for question in instance.questions.all():
questions.append({
'text': question.text,
'options': [
{
'name': 'Yes',
'slug': 'yes',
'value': 1
}, {
'name': 'No',
'slug': 'no',
'value': 0
}
]
})
return questions | 30.638889 | 72 | 0.514959 |
d4a2cffc4ff29d5cb5b66d9083b2e9d36b441377 | 3,200 | py | Python | examples/pybullet/gym/pybullet_utils/system_state.py | caffett/bullet3 | 1e125425141967f8a7c99be46216ddeed7acf24d | [
"Zlib"
] | null | null | null | examples/pybullet/gym/pybullet_utils/system_state.py | caffett/bullet3 | 1e125425141967f8a7c99be46216ddeed7acf24d | [
"Zlib"
] | null | null | null | examples/pybullet/gym/pybullet_utils/system_state.py | caffett/bullet3 | 1e125425141967f8a7c99be46216ddeed7acf24d | [
"Zlib"
] | null | null | null | # -*- coding: utf-8 -*-
# -------------------------------
# Author: Zikang Xiong
# Email: zikangxiong@gmail.com
# Date: 2020-03-15 13:32:20
# Last Modified by: Zikang Xiong
# Last Modified time: 2020-03-16 00:15:14
# -------------------------------
# Get states of all bodies and joints and reset them to specific state
# Note, this cannot ensure the deterministic of a system,
# because the contact(collision) information is not stored.
# pybullet.saveState and pybullet.restoreState do store the contact information.
# A way to get deterministic excution is store a state with pybullet.saveState and modify the state with this util.
# Note, please call pybullet.setPhysicsEngineParameter(deterministicOverlappingPairs=1)
# to make sure the collision tree deterministic.
import numpy as np
def _iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def _flatten(state):
flatten_state = []
def flatten_helper(state):
for item in state:
if _iterable(item):
flatten_helper(item)
else:
flatten_state.append(item)
flatten_helper(state)
return flatten_state
def _fill(template, flatten_state):
sys_state = template
global flatten_index
flatten_index = 0
def fill_helper(state):
global flatten_index
for i in range(len(state)):
if _iterable(state[i]):
fill_helper(state[i])
else:
state[i] = flatten_state[flatten_index]
flatten_index += 1
fill_helper(sys_state)
return sys_state
def get_current_system_state(client, flatten=False):
# print("call get_current_system_state")
state = []
num_bodies = client.getNumBodies()
for i in range(num_bodies):
body_state = []
for j in range(client.getNumJoints(i)):
jointPosition, jointVelocity, jointReaction, \
ForcesappliedJointMotorTorque = client.getJointState(i, j)
joint_state = [jointPosition, jointVelocity,
list(jointReaction), ForcesappliedJointMotorTorque]
body_state.append(joint_state)
pos, orn = client.getBasePositionAndOrientation(i)
linVel, angVel = client.getBaseVelocity(i)
body_state.append([list(pos), list(orn), list(linVel), list(angVel)])
state.append(body_state)
return np.array(_flatten(state)) if flatten else np.array(state)
def reset_current_system_state(client, state):
# print("call reset_current_system_state")
if not _iterable(state[-1]):
template = get_current_system_state(client)
state = _fill(template, state)
for i in range(client.getNumBodies()):
for j in range(client.getNumJoints(i)):
pos = state[i][j][0]
vel = state[i][j][1]
# Why does this function just provide reseting position and
# velocity
client.resetJointState(i, j, pos, vel)
pos, orn, linVel, angVel = state[i][-1]
client.resetBasePositionAndOrientation(i, pos, orn)
client.resetBaseVelocity(i, linVel, angVel)
return client | 32 | 115 | 0.641563 |
f10315d4c25b3c98a93abcd0bdced843f3e3593c | 6,317 | py | Python | taskmon/dbmodels.py | fractal520/dbops | 20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac | [
"MIT"
] | null | null | null | taskmon/dbmodels.py | fractal520/dbops | 20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac | [
"MIT"
] | null | null | null | taskmon/dbmodels.py | fractal520/dbops | 20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac | [
"MIT"
] | null | null | null | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql.base import INTEGER
from sqlalchemy.orm import sessionmaker, relationship
from datetime import datetime
Base = declarative_base()
conn = sa.create_engine('mysql+pymysql://opr:Opr*1234@127.0.0.1/dbops')
Session = sessionmaker(bind=conn)
session = Session()
class Ip_address(Base):
__tablename__ = 'ip_addresses'
ip_id = sa.Column(sa.Integer, primary_key=True)
ip_address= sa.Column(INTEGER(display_width=11, unsigned=True))
instances = relationship('Instance', backref='ip_address', lazy='dynamic')
hosts = relationship('Host', backref='ip_address', lazy='dynamic')
@property
def true_ip(self):
return session.query(sa.func.INET_NTOA(Ip_address.ip_address)).filter_by(ip_id=self.ip_id).first()[0]
@true_ip.setter
def true_ip(self, true_ip):
self.ip_address = session.query(sa.func.INET_ATON(true_ip)).first()[0]
class Host(Base):
__tablename__ = 'hosts'
host_id = sa.Column(sa.Integer, primary_key=True)
host_name = sa.Column(sa.String(100))
host_ip_id = sa.Column(sa.Integer, sa.ForeignKey('ip_addresses.ip_id'))
instances = relationship('Instance', backref='host', lazy='dynamic')
class Dbinst_role(Base):
__tablename__ = 'dbinst_roles'
dbinst_role_id = sa.Column(sa.SmallInteger, primary_key=True)
db_type_id = sa.Column(sa.Integer, sa.ForeignKey('dbtypes.db_type_id'))
dbinst_role_name = sa.Column(sa.String(100))
specify_role_instes = relationship('Instance', backref='inst_role', lazy='dynamic')
class Instance(Base):
__tablename__ = 'instances'
instance_id = sa.Column(sa.Integer, primary_key=True)
instance_name = sa.Column(sa.String(100))
access_ip_id = sa.Column(sa.Integer, sa.ForeignKey('ip_addresses.ip_id'))
access_port = sa.Column(sa.Integer)
dbinst_role_id = sa.Column(sa.SmallInteger, sa.ForeignKey('dbinst_roles.dbinst_role_id'))
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
host_id = sa.Column(sa.Integer, sa.ForeignKey('hosts.host_id'))
class Dbtype(Base):
__tablename__ = 'dbtypes'
db_type_id = sa.Column(sa.Integer, primary_key=True)
db_type_name = sa.Column(sa.String(20), unique=True, nullable=False)
specify_type_dbs = relationship('Dbinfo', backref='dbtype', lazy='dynamic')
specify_type_arches = relationship('Db_arch', backref='dbtype', lazy='dynamic')
class Db_arch(Base):
__tablename__ = 'db_arches'
db_arch_id = sa.Column(sa.SmallInteger, primary_key=True)
db_type_id = sa.Column(sa.Integer, sa.ForeignKey('dbtypes.db_type_id'))
db_arch_name = sa.Column(sa.String(100))
specify_arch_dbs = relationship('Dbinfo', backref='db_arch', lazy='dynamic')
class Dbinfo(Base):
__tablename__ = 'dbinfos'
db_id = sa.Column(sa.Integer, primary_key=True)
dbname = sa.Column(sa.String(100))
db_type_id = sa.Column(sa.Integer, sa.ForeignKey('dbtypes.db_type_id'))
db_arch_id = sa.Column(sa.SmallInteger, sa.ForeignKey('db_arches.db_arch_id'))
add_time = sa.Column(sa.DateTime, index=True, default=datetime.utcnow)
instances = relationship('Instance', backref='dbinfo', lazy='dynamic')
alarm_logs = relationship('Alarm_log', backref='dbinfo', lazy='dynamic')
alarm_thresholds = relationship('Alarm_threshold', backref='dbinfo', lazy='dynamic')
schemas = relationship('Db_schema', backref='dbinfo', lazy='dynamic')
def __repr__(self):
return '<Dbinfo %r>' % self.dbname
class Db_schema(Base):
__tablename__ = 'db_schemas'
schema_id = sa.Column(sa.Integer, primary_key=True)
schema_name = sa.Column(sa.String(100))
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
class Alarm_level(Base):
__tablename__ = 'alarm_levels'
level_id = sa.Column(sa.Integer, primary_key=True)
level_name = sa.Column(sa.String(20))
level_desc = sa.Column(sa.Text)
alarm_logs = relationship('Alarm_log', backref='alarm_level', lazy='dynamic')
alarm_thresholds = relationship('Alarm_threshold', backref='alarm_level', lazy='dynamic')
class Alarm_log(Base):
__tablename__ = 'alarm_logs'
id = sa.Column(sa.Integer, primary_key=True)
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
alarm_message = sa.Column(sa.Text)
level_name = sa.Column(sa.String(20))
level_id = sa.Column(sa.Integer, sa.ForeignKey('alarm_levels.level_id'))
check_id = sa.Column(sa.Integer, sa.ForeignKey('check_items.check_id'))
create_time = sa.Column(sa.DateTime, index=True, default=datetime.utcnow)
status = sa.Column(sa.SmallInteger, default=2)
finish_time = sa.Column(sa.DateTime)
class Check_connectivity_log(Base):
__tablename__ = 'check_connectivity_logs'
id = sa.Column(sa.Integer, primary_key=True)
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
status = sa.Column(sa.String(20))
check_time = sa.Column(sa.DateTime, index=True, default=datetime.utcnow)
class Check_connect_num_log(Base):
__tablename__ = 'check_connect_num_logs'
id = sa.Column(sa.Integer, primary_key=True)
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
connect_num = sa.Column(sa.Integer)
max_num = sa.Column(sa.Integer)
check_time = sa.Column(sa.DateTime, index=True, default=datetime.utcnow)
class Check_item(Base):
__tablename__ = 'check_items'
check_id = sa.Column(sa.Integer, primary_key=True)
check_name = sa.Column(sa.String(100))
frequency = sa.Column(sa.SmallInteger)
active = sa.Column(sa.Boolean, default=True)
description = sa.Column(sa.Text)
class_of_log = sa.Column(sa.String(50))
alarm_logs = relationship('Alarm_log', backref='check_item', lazy='dynamic')
alarm_thresholds = relationship('Alarm_threshold', backref='check_item', lazy='dynamic')
class Alarm_threshold(Base):
__tablename__ = 'alarm_thresholds'
id = sa.Column(sa.Integer, primary_key=True)
db_id = sa.Column(sa.Integer, sa.ForeignKey('dbinfos.db_id'))
check_id = sa.Column(sa.Integer, sa.ForeignKey('check_items.check_id'))
level_id = sa.Column(sa.Integer, sa.ForeignKey('alarm_levels.level_id'))
threshold = sa.Column(sa.Numeric(3, 2))
active = sa.Column(sa.Boolean, default=True)
| 40.754839 | 109 | 0.723286 |
c4a435ab4e16392d2888929b94d0cc4a041cfdce | 2,677 | py | Python | ObstacleAvoidance/server_depth.py | alexvbogdan/Assistant-for-People-with-Low-Vision | 2c8d60a857a63ce516f33263e61313a3bad0695f | [
"MIT"
] | 1 | 2020-08-21T07:35:54.000Z | 2020-08-21T07:35:54.000Z | ObstacleAvoidance/server_depth.py | alexvbogdan/Assistant-for-People-with-Low-Vision | 2c8d60a857a63ce516f33263e61313a3bad0695f | [
"MIT"
] | null | null | null | ObstacleAvoidance/server_depth.py | alexvbogdan/Assistant-for-People-with-Low-Vision | 2c8d60a857a63ce516f33263e61313a3bad0695f | [
"MIT"
] | null | null | null | from __future__ import print_function
from flask import Flask, request, redirect, url_for, jsonify, send_from_directory
from time import time
import time
import hashlib
import os
import sys
import imp
import pdb
import cv2
from build import Depth_extraction
UPLOAD_FOLDER = '/home/oleg/server_assembly/depth/uploads/original/'
UPLOAD_FOLDER_LEFT = '/home/oleg/server_assembly/depth/uploads/left/'
UPLOAD_FOLDER_RIGHT = '/home/oleg/server_assembly/depth/uploads/right/'
paramPath = '/home/oleg/server_assembly/depth/StereoParams.yml'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'JPG', 'JPEG', 'png', 'PNG'])
# global variables
app = Flask(__name__, static_url_path='')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# helpers
def setup():
# uploads
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# routes
@app.route('/', methods=['GET'])
def index():
return "GET REQUEST ENTERED"
#app.send_static_file('demo2.html')
@app.route('/api/upload_all', methods=['POST'])
def upload_all():
print("new request!")
file = request.files['image']
if not file:
print("no file!!!!")
return jsonify({'error': 'No file was uploaded.'})
print("file ok")
if not allowed_file(file.filename):
print("format is not allowed")
return jsonify({'error': 'Please upload a JPG or PNG.'})
file_hash = hashlib.md5(file.read()).hexdigest()
#save_path = os.path.join(app.config['UPLOAD_FOLDER'], file_hash + '.png')
save_path_original = UPLOAD_FOLDER + file_hash + '.png'
file.seek(0)
file.save(save_path_original)
save_path_left = UPLOAD_FOLDER_LEFT + file_hash + '.png'
save_path_right =UPLOAD_FOLDER_RIGHT + file_hash + '.png'
orgImage = cv2.imread(save_path_original)
crop_img_L = orgImage[0:orgImage.shape[0], 0:orgImage.shape[1]/2]
cv2.imwrite(save_path_left,crop_img_L)
crop_img_R = orgImage[0:orgImage.shape[0], orgImage.shape[1]/2:orgImage.shape[1]]
cv2.imwrite(save_path_right,crop_img_R)
print("saved files")
try:
depth, side_int = Depth_extraction.mainRoutine(save_path_left, save_path_right, paramPath)
except:
json = jsonify({'distance': '0.0', 'side': 'none'})
return json
print("finished processing")
print(depth)
if side_int == 0: side = "left"
if side_int == 1: side = "front"
if side_int == 2: side = "Right"
json = jsonify({'distance': str(depth), 'side': side})
return json
if __name__ == '__main__':
setup()
app.run(host='0.0.0.0', port=5001, debug=False)
| 30.770115 | 98 | 0.688457 |
a7069da47227cb14699b241dd2ac885815ad64e6 | 1,734 | py | Python | code/actions/github.py | lpmatos/corona-tracker | ac4e28e6d2f91b2b5859b9900e3e92a5f1cdeacc | [
"MIT"
] | 1 | 2020-03-19T18:34:30.000Z | 2020-03-19T18:34:30.000Z | code/actions/github.py | lpmatos/corona-tracker | ac4e28e6d2f91b2b5859b9900e3e92a5f1cdeacc | [
"MIT"
] | null | null | null | code/actions/github.py | lpmatos/corona-tracker | ac4e28e6d2f91b2b5859b9900e3e92a5f1cdeacc | [
"MIT"
] | null | null | null | # =============================================================================
# IMPORTS
# =============================================================================
import csv
import requests
from cachetools import cached, TTLCache
# =============================================================================
# GLOBAL
# =============================================================================
CASOS_POR_CIDADE = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities.csv"
CASOS_POR_ESTADO = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
CASOS_TOTAIS = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-total.csv"
BASE_URL = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-%s.csv"
# =============================================================================
# FUNCTIONS
# =============================================================================
@cached(cache=TTLCache(maxsize=1024, ttl=3600))
def get_brazil_information(vision):
if isinstance(vision, str):
vision = vision.lower()
try:
request = requests.get(BASE_URL % vision)
if request.status_code == 200:
return request.text
else:
return False
except Exception as error:
return False
else:
raise Exception("We need a string.")
def parse_to_csv(information):
try:
return list(csv.DictReader(information.splitlines()))
except Exception as error:
return False
def find_estado(estado, information):
return [elemento for elemento in parse_to_csv(information) if elemento["state"] == estado.upper()][0]
| 38.533333 | 105 | 0.502307 |
c1bbb0f54468169c736a51e88e56c5a7c092294c | 578 | py | Python | db/do_sqlite.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | 2 | 2018-01-20T03:38:58.000Z | 2019-07-21T11:33:24.000Z | db/do_sqlite.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | db/do_sqlite.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | import sqlite3
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
cursor.execute('create table if not EXISTS user (id VARCHAR(20) PRIMARY key, name VARCHAR(20))')
cursor.execute('insert into user (id, name) VALUES (\'3\', \'xiaoxiao\')')
print('rowcount =', cursor.rowcount)
cursor.close()
conn.commit()
conn.close()
# 查询记录:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
# 执行查询语句:
# cursor.execute('select * from user where id=?', '1')
cursor.execute('select * from user')
# 获得查询结果集:
values = cursor.fetchall()
print(values)
cursor.close()
conn.close() | 20.642857 | 96 | 0.693772 |
5e4d6445dc7b246adc52993f9f8fcb1777ef3118 | 1,699 | py | Python | tests/test_extend_trace_selectively.py | bcliang/dash-extendable-graph | 5163f551d0e4b1c8ec283d903d58691241c84e51 | [
"MIT"
] | 13 | 2019-03-14T21:21:17.000Z | 2021-12-23T08:32:41.000Z | tests/test_extend_trace_selectively.py | bcliang/dash-extendable-graph | 5163f551d0e4b1c8ec283d903d58691241c84e51 | [
"MIT"
] | 43 | 2019-01-27T17:33:07.000Z | 2022-01-20T21:07:10.000Z | tests/test_extend_trace_selectively.py | bcliang/dash-extendable-graph | 5163f551d0e4b1c8ec283d903d58691241c84e51 | [
"MIT"
] | 1 | 2020-06-19T14:38:26.000Z | 2020-06-19T14:38:26.000Z | import dash_extendable_graph as deg
import dash
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_html_components as html
import dash_core_components as dcc
import json
# extending a specific trace (trace indices)
def test_extg004_extend_trace_selectively(dash_duo):
app = dash.Dash(__name__)
app.layout = html.Div(
[
deg.ExtendableGraph(
id="extend_trace_selectively",
figure=dict(data=[dict(y=[0]), dict(y=[1])]),
),
html.Div(id="output"),
dcc.Interval(
id="interval_extendablegraph_update",
interval=100,
n_intervals=0,
max_intervals=1,
),
]
)
@app.callback(
Output("extend_trace_selectively", "extendData"),
[Input("interval_extendablegraph_update", "n_intervals")],
[State("extend_trace_selectively", "figure")],
)
def trace_will_extend_selectively(n_intervals, figure):
if n_intervals is None or n_intervals < 1:
raise PreventUpdate
return [dict(y=[2])], [1]
@app.callback(
Output("output", "children"),
[Input("extend_trace_selectively", "extendData")],
[State("extend_trace_selectively", "figure")],
)
def display_data(trigger, figure):
if figure is None:
raise PreventUpdate
return json.dumps(figure["data"])
dash_duo.start_server(app)
dash_duo.find_element("#extend_trace_selectively")
comparison = json.dumps([dict(y=[0]), dict(y=[1, 2])])
dash_duo.wait_for_text_to_equal("#output", comparison)
| 29.293103 | 66 | 0.623308 |
2c1150a3238057491c5cae7cb0e00ec4942b3592 | 933 | py | Python | 4 - manipulating dataframes with pandas/Grouping data/computing multiple aggregates of multiple columns.py | Baidaly/datacamp-samples | 09b3e253ec2c503df936298fedc3902413c987b0 | [
"MIT"
] | null | null | null | 4 - manipulating dataframes with pandas/Grouping data/computing multiple aggregates of multiple columns.py | Baidaly/datacamp-samples | 09b3e253ec2c503df936298fedc3902413c987b0 | [
"MIT"
] | null | null | null | 4 - manipulating dataframes with pandas/Grouping data/computing multiple aggregates of multiple columns.py | Baidaly/datacamp-samples | 09b3e253ec2c503df936298fedc3902413c987b0 | [
"MIT"
] | null | null | null | '''
The .agg() method can be used with a tuple or list of aggregations as input. When applying multiple aggregations on multiple columns, the aggregated DataFrame has a multi-level column index.
In this exercise, you're going to group passengers on the Titanic by 'pclass' and aggregate the 'age' and 'fare' columns by the functions 'max' and 'median'. You'll then use multi-level selection to find the oldest passenger per class and the median fare price per class.
The DataFrame has been pre-loaded as titanic.
'''
# Group titanic by 'pclass': by_class
by_class = titanic.groupby('pclass')
# Select 'age' and 'fare'
by_class_sub = by_class[['age','fare']]
# Aggregate by_class_sub by 'max' and 'median': aggregated
aggregated = by_class_sub.agg(['max', 'median'])
# Print the maximum age in each class
print(aggregated.loc[:, ('age','max')])
# Print the median fare in each class
print(aggregated.loc[:, ('fare', 'median')])
| 42.409091 | 271 | 0.73955 |
6a05ac9e0f9263a9267a3e7b85141fca121ab7a0 | 1,168 | py | Python | bootstrapvz/providers/virtualbox/__init__.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 1 | 2019-06-11T17:36:37.000Z | 2019-06-11T17:36:37.000Z | bootstrapvz/providers/virtualbox/__init__.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 3 | 2017-05-10T15:04:10.000Z | 2017-06-02T18:14:50.000Z | bootstrapvz/providers/virtualbox/__init__.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 14 | 2016-12-15T09:29:10.000Z | 2021-01-28T13:06:14.000Z | from bootstrapvz.common import task_groups
import tasks.packages
import tasks.boot
from bootstrapvz.common.tasks import image
from bootstrapvz.common.tasks import loopback
def validate_manifest(data, validator, error):
from bootstrapvz.common.tools import rel_path
validator(data, rel_path(__file__, 'manifest-schema.yml'))
def resolve_tasks(taskset, manifest):
taskset.update(task_groups.get_standard_groups(manifest))
taskset.update([tasks.packages.DefaultPackages,
tasks.boot.AddVirtualConsoleGrubOutputDevice,
loopback.AddRequiredCommands,
loopback.Create,
image.MoveImage,
])
if manifest.provider.get('guest_additions', False):
from tasks import guest_additions
taskset.update([guest_additions.CheckGuestAdditionsPath,
guest_additions.AddGuestAdditionsPackages,
guest_additions.InstallGuestAdditions,
])
def resolve_rollback_tasks(taskset, manifest, completed, counter_task):
taskset.update(task_groups.get_standard_rollback_tasks(completed))
| 35.393939 | 71 | 0.695205 |
6b5c5576ebe707b3e567bd57c13d786b8448e7b3 | 12,148 | py | Python | fem/utilities/tables/empty_table/_empty_table.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | 1 | 2019-08-03T21:40:26.000Z | 2019-08-03T21:40:26.000Z | fem/utilities/tables/empty_table/_empty_table.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | null | null | null | fem/utilities/tables/empty_table/_empty_table.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | null | null | null | """
fem.utilities.tables.abstract_table._abstract_table
Author: Michael Redmond
"""
from __future__ import print_function, absolute_import
from six.moves import range
from qtpy import QtCore, QtWidgets, QtGui
from fem.utilities import MrSignal
from operator import itemgetter
import numpy as np
class EmptyTable(QtWidgets.QWidget):
def __init__(self, parent=None, *args):
super(EmptyTable, self).__init__(parent, *args)
self.setLayout(QtWidgets.QVBoxLayout())
#### buttons ####
self.pushButton_add = QtWidgets.QPushButton('Add', self)
self.pushButton_insert = QtWidgets.QPushButton('Insert', self)
self.pushButton_delete = QtWidgets.QPushButton('Delete', self)
self.button_spacer = QtWidgets.QSpacerItem(
100, 10, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.button_layout = QtWidgets.QHBoxLayout()
self.button_layout.addWidget(self.pushButton_add)
self.button_layout.addWidget(self.pushButton_insert)
self.button_layout.addWidget(self.pushButton_delete)
self.button_layout.addItem(self.button_spacer)
#### table_2 ####
self.table = QtWidgets.QTableView(self)
self.table.wheelEvent = self._wheel_event
self.table.resizeRowsToContents = self._resize_rows
self.table.resizeColumnsToContents = self._resize_columns
# self.table_2.setModel(QtCore.QAbstractTableModel())
#### bottom buttons ####
self.pushButton_up = QtWidgets.QPushButton('^', self)
self.pushButton_down = QtWidgets.QPushButton('v', self)
self.lineEdit_rows = QtWidgets.QLineEdit(self)
self.lineEdit_rows.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_rows.setCursor(QtCore.Qt.ArrowCursor)
self.lineEdit_rows.setMaximumWidth(50)
self.lineEdit_rows.editingFinished.connect(self._set_rows)
self.lineEdit_rows.mousePressEvent = self._rows_mouse_press
self.lineEdit_rows.setStyleSheet(
"""
QLineEdit::hover{
background-color: Lightcyan
}
"""
)
self.bottom_spacer = QtWidgets.QSpacerItem(
100, 10, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.bottom_layout = QtWidgets.QHBoxLayout()
self.bottom_layout.addWidget(self.pushButton_up)
self.bottom_layout.addWidget(self.pushButton_down)
self.bottom_layout.addItem(self.bottom_spacer)
self.bottom_layout.addWidget(self.lineEdit_rows)
#### add to layout ####
self.layout().addItem(self.button_layout)
self.layout().addWidget(self.table)
self.layout().addItem(self.bottom_layout)
# self.table_2.selectionModel().selectionChanged.connect(self._selection_changed)
self._old_row = -1
self._old_column = -1
self._enter_down = False
self.selection_changed = MrSignal()
self.row_changed = MrSignal()
self.column_changed = MrSignal()
self.undo = MrSignal()
self.redo = MrSignal()
self.paste = MrSignal()
self.set_data = MrSignal()
self.set_rows = MrSignal()
self.data_changed = MrSignal()
self.table.keyPressEvent = self._keyPressEvent
self.table.keyReleaseEvent = self._keyReleaseEvent
# self.table.mousePressEvent = self._mousePressEvent
self._scroll_factor = 1.
def _wheel_event(self, event):
if not QtWidgets.QApplication.keyboardModifiers() & QtCore.Qt.ControlModifier:
return QtWidgets.QTableView.wheelEvent(self.table, event)
font = self.table.font()
new_size = old_size = font.pointSize()
if event.angleDelta().y() > 0:
new_size += 1
else:
new_size -= 1
if new_size == 0:
new_size = old_size
self._scroll_factor = new_size / old_size
font.setPointSize(new_size)
self.table.setFont(font)
self.table.horizontalHeader().setFont(font)
self.table.verticalHeader().setFont(font)
self._resize_rows()
self._resize_columns()
self._scroll_factor = 1.
# return QtWidgets.QTableView.wheelEvent(self.table, event)
def _resize_rows(self):
header = self.table.verticalHeader()
for i in range(self.table.model().rowCount()):
header.resizeSection(i, self.table.rowHeight(i) * self._scroll_factor)
def _resize_columns(self):
header = self.table.horizontalHeader()
for i in range(self.table.model().columnCount()):
header.resizeSection(i, self.table.columnWidth(i) * self._scroll_factor)
def _rows_mouse_press(self, event):
event.accept()
self.lineEdit_rows.selectAll()
def _set_rows(self, *args):
try:
rows = int(self.lineEdit_rows.text())
except (ValueError, TypeError):
rows = -1
self.lineEdit_rows.clearFocus()
self.set_rows.emit(rows)
def hide_buttons(self):
self.pushButton_add.hide()
self.pushButton_insert.hide()
self.pushButton_delete.hide()
self.pushButton_up.hide()
self.pushButton_down.hide()
self.lineEdit_rows.hide()
def show_buttons(self):
self.pushButton_add.show()
self.pushButton_insert.show()
self.pushButton_delete.show()
self.pushButton_up.show()
self.pushButton_down.show()
self.lineEdit_rows.show()
def set_model(self, model):
self.table.setModel(model)
self.table.selectionModel().selectionChanged.connect(self._selection_changed)
self.table.model().set_data.connect(self._set_data)
self.table.model().dataChanged.connect(self._data_changed)
def _data_changed(self, *args):
self.data_changed.emit()
def _set_data(self, *args):
self.set_data.emit(self._enter_down, *args)
def set_selection(self, selections):
try:
QtWidgets.QApplication.instance().focusWidget().clearFocus()
except AttributeError:
pass
selection_model = self.table.selectionModel()
selection_model.clearSelection()
first_index = None
model = self.table.model()
for i in range(len(selections)):
selection = selections[i]
index = model.index(selection[0], selection[1])
selection_model.select(index, QtCore.QItemSelectionModel.Select)
if first_index is None:
first_index = index
self.table.setFocus()
return first_index
def set_selection_and_index(self, selections):
self.table.setCurrentIndex(self.set_selection(selections))
def select_last_row(self):
data_len = self.table.model().rowCount()
self.set_selection_and_index([[data_len - 1, 0]])
def select_and_edit(self, row, column):
index = self.table.model().index(row, column)
selection_model = self.table.selectionModel()
selection_model.clear()
selection_model.select(index, QtCore.QItemSelectionModel.Select)
self.edit(index)
def selection_range(self):
selection = self.table.selectionModel().selectedIndexes()
min_row = 9999999
max_row = -1
min_col = 9999999
max_col = -1
for index in selection:
row = index.row()
col = index.column()
min_row = min(min_row, row)
max_row = max(max_row, row)
min_col = min(min_col, col)
max_col = max(max_col, col)
return [(min_row, min_col), (max_row, max_col)]
def selection(self):
selection = self.table.selectionModel().selectedIndexes()
tmp = []
for index in selection:
row = index.row()
col = index.column()
tmp.append((row, col))
return sorted(tmp, key=itemgetter(0))
def update_all(self):
try:
self.table.model().update_all()
except AttributeError:
pass
def _selection_changed(self, current, previous):
"""
:type current: QtGui.QItemSelection
:type previous:QtGui.QItemSelection
"""
try:
first_index = current.indexes()[0]
except IndexError:
return
new_row = first_index.row()
new_column = first_index.column()
old_row = self._old_row
old_column = self._old_column
selection_changed = False
if new_row != old_row:
self._old_row = new_row
self.row_changed.emit(self._old_row)
selection_changed = True
if new_column != old_column:
self._old_column = new_column
self.column_changed.emit(self._old_column)
selection_changed = True
if selection_changed:
self.selection_changed.emit((new_row, new_column))
def _keyPressEvent(self, event):
QtWidgets.QTableView.keyPressEvent(self.table, event)
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self._enter_down = True
# event.accept()
elif event.key() == QtCore.Qt.Key_Tab:
event.accept()
if event.matches(QtGui.QKeySequence.Copy):
self._copy()
event.accept()
elif event.matches(QtGui.QKeySequence.Paste):
paste_data = str(QtWidgets.QApplication.clipboard().text())
self.paste.emit(self.selection_range(), paste_data)
event.accept()
elif event.matches(QtGui.QKeySequence.Undo):
event.accept()
self.undo.emit()
elif event.matches(QtGui.QKeySequence.Redo):
event.accept()
self.redo.emit()
def _keyReleaseEvent(self, event):
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self._enter_down = False
QtWidgets.QTableView.keyReleaseEvent(self.table, event)
def mousePressEvent(self, event):
self.lineEdit_rows.clearFocus()
QtWidgets.QWidget.mousePressEvent(self, event)
def current_index(self):
index = self.table.currentIndex()
return index.row(), index.column()
def _copy(self):
model = self.table.model()
selection = self.table.selectionModel().selectedIndexes()
edit_role = QtCore.Qt.EditRole
try:
row1 = selection[0].row()
col1 = selection[0].column()
except IndexError:
return
copy_data = []
min_row = 9999999
max_row = -1
min_col = 9999999
max_col = -1
for index in selection:
row = index.row()
col = index.column()
min_row = min(min_row, row)
max_row = max(max_row, row)
min_col = min(min_col, col)
max_col = max(max_col, col)
data = str(model.data(index, edit_role))
copy_data.append([row, col, data])
rows = max_row - min_row + 1
columns = max_col - min_col + 1
if rows == 0 or columns == 0:
return
np_data = np.zeros((rows, columns), dtype=object)
for tmp in copy_data:
row, col, data = tmp
row -= min_row
col -= min_col
np_data[row, col] = data
text = []
for i in range(np_data.shape[0]):
_text = []
for j in range(np_data.shape[1]):
_text.append(np_data[i, j])
text.append('\t'.join(_text))
text = '\n'.join(text)
# noinspection PyArgumentList
clipboard = QtWidgets.QApplication.clipboard()
""":type: QtGui.QClipboard"""
clipboard.setText(text)
def row_count(self):
return self.table.model().rowCount()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication([])
widget = EmptyTable()
widget.show()
sys.exit(app.exec_())
| 27.990783 | 89 | 0.615904 |
99679b8e5d081577d74f03ebf3e963ad1f790475 | 3,277 | py | Python | mvn/models/loss.py | kristijanbartol/learnable-triangulation-pytorch | a8475847f9c71119fa557aad4328b326be09c136 | [
"MIT"
] | null | null | null | mvn/models/loss.py | kristijanbartol/learnable-triangulation-pytorch | a8475847f9c71119fa557aad4328b326be09c136 | [
"MIT"
] | null | null | null | mvn/models/loss.py | kristijanbartol/learnable-triangulation-pytorch | a8475847f9c71119fa557aad4328b326be09c136 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import nn
def expand_tensor(t, n_view_comb):
return torch.transpose(t.repeat((n_view_comb, 1, 1, 1)), 0, 1)
class KeypointsMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, keypoints_pred, keypoints_gt, keypoints_binary_validity):
dimension = keypoints_pred.shape[-1]
loss = torch.sum((keypoints_gt - keypoints_pred) ** 2 * keypoints_binary_validity)
loss = loss / (dimension * max(1, torch.sum(keypoints_binary_validity).item()))
return loss
class KeypointsMSESmoothLoss(nn.Module):
def __init__(self, threshold=400):
super().__init__()
self.threshold = threshold
def forward(self, keypoints_pred, conf_pred, keypoints_gt, keypoints_binary_validity):
conf_pred_mean = torch.unsqueeze(torch.mean(conf_pred, dim=1), dim=2)
dimension = keypoints_pred.shape[-1]
diff = (keypoints_gt - keypoints_pred) ** 2 * keypoints_binary_validity
diff[diff > self.threshold] = torch.pow(diff[diff > self.threshold], 0.1) * (self.threshold ** 0.9)
loss = torch.sum(diff * conf_pred_mean) / (dimension * max(1, torch.sum(keypoints_binary_validity).item()))
return loss
class KeypointsMAELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, keypoints_pred, keypoints_gt, keypoints_binary_validity):
dimension = keypoints_pred.shape[-1]
loss = torch.sum(torch.abs(keypoints_gt - keypoints_pred) * keypoints_binary_validity)
loss = loss / (dimension * max(1, torch.sum(keypoints_binary_validity).item()))
return loss
class KeypointsL2Loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, keypoints_pred, keypoints_gt, keypoints_binary_validity):
loss = torch.sum(torch.sqrt(torch.sum((keypoints_gt - keypoints_pred) ** 2 * keypoints_binary_validity, dim=2)))
loss = loss / max(1, torch.sum(keypoints_binary_validity).item())
return loss
class VolumetricCELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, coord_volumes_batch, volumes_batch_pred, keypoints_gt, keypoints_binary_validity):
loss = 0.0
n_losses = 0
batch_size = volumes_batch_pred.shape[0]
for batch_i in range(batch_size):
coord_volume = coord_volumes_batch[batch_i]
keypoints_gt_i = keypoints_gt[batch_i]
coord_volume_unsq = coord_volume.unsqueeze(0)
keypoints_gt_i_unsq = keypoints_gt_i.unsqueeze(1).unsqueeze(1).unsqueeze(1)
dists = torch.sqrt(((coord_volume_unsq - keypoints_gt_i_unsq) ** 2).sum(-1))
dists = dists.view(dists.shape[0], -1)
min_indexes = torch.argmin(dists, dim=-1).detach().cpu().numpy()
min_indexes = np.stack(np.unravel_index(min_indexes, volumes_batch_pred.shape[-3:]), axis=1)
for joint_i, index in enumerate(min_indexes):
validity = keypoints_binary_validity[batch_i, joint_i]
loss += validity[0] * (-torch.log(volumes_batch_pred[batch_i, joint_i, index[0], index[1], index[2]] + 1e-6))
n_losses += 1
return loss / n_losses
| 38.104651 | 125 | 0.67043 |
2cba0711b36025d07e50f93603ca709439bc6527 | 7,796 | py | Python | python_modules/dagster/dagster/core/types/runtime/config_schema.py | david-alexander-white/dagster | 1c341500bb2380e14873b59b7e25503270188bda | [
"Apache-2.0"
] | 1 | 2022-02-07T18:07:36.000Z | 2022-02-07T18:07:36.000Z | python_modules/dagster/dagster/core/types/runtime/config_schema.py | david-alexander-white/dagster | 1c341500bb2380e14873b59b7e25503270188bda | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/types/runtime/config_schema.py | david-alexander-white/dagster | 1c341500bb2380e14873b59b7e25503270188bda | [
"Apache-2.0"
] | null | null | null | from dagster import check
from dagster.core.decorator_utils import (
split_function_parameters,
validate_decorated_fn_positionals,
)
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.config.config_type import ConfigType, ConfigTypeKind, List, Nullable
from dagster.core.types.wrapping.builtin_enum import BuiltinEnum
from dagster.core.types.wrapping.wrapping import WrappingListType, WrappingNullableType
from dagster.utils import ensure_single_item
class InputHydrationConfig(object):
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def construct_from_config_value(self, _context, config_value):
'''
How to create a runtime value from config data.
'''
return config_value
def _resolve_config_schema_type(dagster_type):
# This replicates a subset of resolve_to_config_type
# Including resolve_to_config_type directly has a nasty circular
# dependency.
if isinstance(dagster_type, ConfigType):
return dagster_type
if BuiltinEnum.contains(dagster_type):
return ConfigType.from_builtin_enum(dagster_type)
elif isinstance(dagster_type, WrappingListType):
return List(dagster_type.inner_type)
elif isinstance(dagster_type, WrappingNullableType):
return Nullable(dagster_type.inner_type)
check.failed('should not reach. got {dagster_type}'.format(dagster_type=dagster_type))
class BareInputSchema(InputHydrationConfig):
def __init__(self, config_type):
self.config_type = check.inst_param(config_type, 'config_type', ConfigType)
@property
def schema_type(self):
return self.config_type
def make_bare_input_schema(config_cls):
config_type = _resolve_config_schema_type(config_cls)
return BareInputSchema(config_type)
class OutputMaterializationConfig(object):
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def materialize_runtime_value(self, _context, _config_value, _runtime_value):
'''
How to materialize a runtime value given configuration.
'''
check.not_implemented('Must implement')
class InputSchemaFromDecorator(InputHydrationConfig):
def __init__(self, config_type, func):
self._config_type = check.inst_param(config_type, 'config_type', ConfigType)
self._func = check.callable_param(func, 'func')
@property
def schema_type(self):
return self._config_type
def construct_from_config_value(self, context, config_value):
return self._func(context, config_value)
def _create_input_schema_for_decorator(config_type, func):
return InputSchemaFromDecorator(config_type, func)
def input_hydration_config(config_cls):
'''Create an input hydration config that maps config data to a runtime value.
The decorated function should take the execution context and parsed config value and return the
appropriate runtime value.
Args:
config_cls (Any): The type of the config data expected by the decorated function. Users
should provide one of the :ref:`built-in types <builtin>`, or a composite constructed
using :py:func:`Selector` or :py:func:`PermissiveDict`.
Examples:
.. code-block:: python
@input_hydration_config(PermissiveDict())
def _dict_input(_context, value):
return value
'''
config_type = _resolve_config_schema_type(config_cls)
EXPECTED_POSITIONALS = ['context', '*']
def wrapper(func):
fn_positionals, _ = split_function_parameters(func, EXPECTED_POSITIONALS)
missing_positional = validate_decorated_fn_positionals(fn_positionals, EXPECTED_POSITIONALS)
if missing_positional:
raise DagsterInvalidDefinitionError(
"@input_hydration_config '{solid_name}' decorated function does not have required positional "
"parameter '{missing_param}'. Solid functions should only have keyword arguments "
"that match input names and a first positional parameter named 'context'.".format(
solid_name=func.__name__, missing_param=missing_positional
)
)
return _create_input_schema_for_decorator(config_type, func)
return wrapper
def input_selector_schema(config_cls):
'''
A decorator for annotating a function that can take the selected properties
from a ``config_value`` in to an instance of a custom type.
Args:
config_cls (Selector)
'''
config_type = _resolve_config_schema_type(config_cls)
check.param_invariant(config_type.kind == ConfigTypeKind.SELECTOR, 'config_cls')
def _wrap(func):
def _selector(context, config_value):
selector_key, selector_value = ensure_single_item(config_value)
return func(context, selector_key, selector_value)
return _create_input_schema_for_decorator(config_type, _selector)
return _wrap
class OutputSchemaForDecorator(OutputMaterializationConfig):
def __init__(self, config_type, func):
self._config_type = check.inst_param(config_type, 'config_type', ConfigType)
self._func = check.callable_param(func, 'func')
@property
def schema_type(self):
return self._config_type
def materialize_runtime_value(self, context, config_value, runtime_value):
return self._func(context, config_value, runtime_value)
def _create_output_schema(config_type, func):
return OutputSchemaForDecorator(config_type, func)
def output_materialization_config(config_cls):
'''Create an output materialization hydration config that configurably materializes a runtime
value.
The decorated function should take the execution context, the parsed config value, and the
runtime value and the parsed config data, should materialize the runtime value, and should
return an appropriate :py:class:`Materialization`.
Args:
config_cls (Any): The type of the config data expected by the decorated function. Users
should provide one of the :ref:`built-in types <builtin>`, or a composite constructed
using :py:func:`Selector` or :py:func:`PermissiveDict`.
Examples:
.. code-block:: python
# Takes a list of dicts such as might be read in using csv.DictReader, as well as a config
value, and writes
@output_materialization_config(Path)
def df_output_schema(_context, path, value):
with open(path, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=value[0].keys())
writer.writeheader()
writer.writerows(rowdicts=value)
return Materialization.file(path)
'''
config_type = _resolve_config_schema_type(config_cls)
return lambda func: _create_output_schema(config_type, func)
def output_selector_schema(config_cls):
'''
A decorator for a annotating a function that can take the selected properties
of a ``config_value`` and an instance of a custom type and materialize it.
Args:
config_cls (Selector):
'''
config_type = _resolve_config_schema_type(config_cls)
check.param_invariant(config_type.kind == ConfigTypeKind.SELECTOR, 'config_cls')
def _wrap(func):
def _selector(context, config_value, runtime_value):
selector_key, selector_value = ensure_single_item(config_value)
return func(context, selector_key, selector_value, runtime_value)
return _create_output_schema(config_type, _selector)
return _wrap
| 35.926267 | 110 | 0.719985 |
8b9953f8af109d260aad67e3f80469b66ec2f05a | 640 | py | Python | tests/test_cli.py | roll/packspec-py | 373e8ab7ca82e76548fb8daa6c136818bdef96f6 | [
"MIT"
] | 2 | 2017-05-10T23:38:58.000Z | 2017-09-09T15:13:55.000Z | tests/test_cli.py | packspec/packspec-py | 373e8ab7ca82e76548fb8daa6c136818bdef96f6 | [
"MIT"
] | null | null | null | tests/test_cli.py | packspec/packspec-py | 373e8ab7ca82e76548fb8daa6c136818bdef96f6 | [
"MIT"
] | null | null | null | from packspec import cli
# Tests
def test_packspec():
specs = cli.parse_specs('tests/packspec.yml')
valid = cli.test_specs(specs)
assert valid
def test_packspec_assertion_fail():
specs = cli.parse_specs('tests/packspec.yml')
specs[0]['features'] = specs[0]['features'][0:3]
specs[0]['features'][2]['result'] = 'FAIL'
valid = cli.test_specs(specs)
assert not valid
def test_packspec_exception_fail():
specs = cli.parse_specs('tests/packspec.yml')
specs[0]['features'] = specs[0]['features'][0:3]
specs[0]['features'][2]['call'] = True
valid = cli.test_specs(specs)
assert not valid
| 24.615385 | 52 | 0.664063 |
294f801435ab10fed45896c9c5d1e519baf78009 | 20,356 | py | Python | django/db/migrations/autodetector.py | schuyler1d/django | a91799a30c79ebbf0e5d9cad35362b4960c030fb | [
"BSD-3-Clause"
] | null | null | null | django/db/migrations/autodetector.py | schuyler1d/django | a91799a30c79ebbf0e5d9cad35362b4960c030fb | [
"BSD-3-Clause"
] | null | null | null | django/db/migrations/autodetector.py | schuyler1d/django | a91799a30c79ebbf0e5d9cad35362b4960c030fb | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
from django.utils import datetime_safe
from django.utils.six.moves import input
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.models.loading import cache
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes()
changes = self._arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def _detect_changes(self):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# We'll store migrations as lists by app names for now
self.migrations = {}
old_app_cache = self.from_state.render()
new_app_cache = self.to_state.render()
# Adding models. Phase 1 is adding models with no outward relationships.
added_models = set(self.to_state.models.keys()) - set(self.from_state.models.keys())
pending_add = {}
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
# Are there any relationships out from this model? if so, punt it to the next phase.
related_fields = []
for field in new_app_cache.get_model(app_label, model_name)._meta.fields:
if field.rel:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label.lower(), field.rel.to._meta.object_name.lower()))
if hasattr(field.rel, "through") and not field.rel.though._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label.lower(), field.rel.through._meta.object_name.lower()))
if related_fields:
pending_add[app_label, model_name] = related_fields
else:
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = model_state.fields,
options = model_state.options,
bases = model_state.bases,
)
)
# Phase 2 is progressively adding pending models, splitting up into two
# migrations if required.
pending_new_fks = []
while pending_add:
# Is there one we can add that has all dependencies satisfied?
satisfied = [(m, rf) for m, rf in pending_add.items() if all((al, mn) not in pending_add for f, al, mn in rf)]
if satisfied:
(app_label, model_name), related_fields = sorted(satisfied)[0]
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = model_state.fields,
options = model_state.options,
bases = model_state.bases,
)
)
for field_name, other_app_label, other_model_name in related_fields:
self.add_dependency(app_label, other_app_label)
del pending_add[app_label, model_name]
# Ah well, we'll need to split one. Pick deterministically.
else:
(app_label, model_name), related_fields = sorted(pending_add.items())[0]
model_state = self.to_state.models[app_label, model_name]
# Work out the fields that need splitting out
bad_fields = dict((f, (al, mn)) for f, al, mn in related_fields if (al, mn) in pending_add)
# Create the model, without those
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = [(n, f) for n, f in model_state.fields if n not in bad_fields],
options = model_state.options,
bases = model_state.bases,
)
)
# Add the bad fields to be made in a phase 3
for field_name, (other_app_label, other_model_name) in bad_fields.items():
pending_new_fks.append((app_label, model_name, field_name, other_app_label))
del pending_add[app_label, model_name]
# Phase 3 is adding the final set of FKs as separate new migrations
for app_label, model_name, field_name, other_app_label in pending_new_fks:
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.AddField(
model_name = model_name,
name = field_name,
field = model_state.get_field_by_name(field_name),
),
new = True,
)
self.add_dependency(app_label, other_app_label)
# Removing models
removed_models = set(self.from_state.models.keys()) - set(self.to_state.models.keys())
for app_label, model_name in removed_models:
model_state = self.from_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.DeleteModel(
model_state.name,
)
)
# Changes within models
kept_models = set(self.from_state.models.keys()).intersection(self.to_state.models.keys())
for app_label, model_name in kept_models:
old_model_state = self.from_state.models[app_label, model_name]
new_model_state = self.to_state.models[app_label, model_name]
# New fields
old_field_names = set(x for x, y in old_model_state.fields)
new_field_names = set(x for x, y in new_model_state.fields)
for field_name in new_field_names - old_field_names:
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = field.deconstruct()[1:]
found_rename = False
for removed_field_name in (old_field_names - new_field_names):
if old_model_state.get_field_by_name(removed_field_name).deconstruct()[1:] == field_dec:
if self.questioner.ask_rename(model_name, removed_field_name, field_name, field):
self.add_to_migration(
app_label,
operations.RenameField(
model_name = model_name,
old_name = removed_field_name,
new_name = field_name,
)
)
old_field_names.remove(removed_field_name)
new_field_names.remove(field_name)
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default():
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_to_migration(
app_label,
operations.AddField(
model_name = model_name,
name = field_name,
field = field,
)
)
# Old fields
for field_name in old_field_names - new_field_names:
self.add_to_migration(
app_label,
operations.RemoveField(
model_name = model_name,
name = field_name,
)
)
# The same fields
for field_name in old_field_names.intersection(new_field_names):
# Did the field change?
old_field_dec = old_model_state.get_field_by_name(field_name).deconstruct()
new_field_dec = new_model_state.get_field_by_name(field_name).deconstruct()
if old_field_dec != new_field_dec:
self.add_to_migration(
app_label,
operations.AlterField(
model_name = model_name,
name = field_name,
field = new_model_state.get_field_by_name(field_name),
)
)
# unique_together changes
if old_model_state.options.get("unique_together", set()) != new_model_state.options.get("unique_together", set()):
self.add_to_migration(
app_label,
operations.AlterUniqueTogether(
name = model_name,
unique_together = new_model_state.options.get("unique_together", set()),
)
)
# Alright, now add internal dependencies
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# Clean up dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
return self.migrations
def add_to_migration(self, app_label, operation, new=False):
migrations = self.migrations.setdefault(app_label, [])
if not migrations or new:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(migrations) + 1), app_label)
migrations.append(instance)
migrations[-1].operations.append(operation)
def add_dependency(self, app_label, other_app_label):
"""
Adds a dependency to app_label's newest migration on
other_app_label's latest migration.
"""
if self.migrations.get(other_app_label, []):
dependency = (other_app_label, self.migrations[other_app_label][-1].name)
else:
dependency = (other_app_label, "__first__")
self.migrations[app_label][-1].dependencies.append(dependency)
def _arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (next_number, self.suggest_name(migration.operations))
name_map[(app_label, migration.name)] = (app_label, new_name)
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names not guaranteed to be unique; they
must be prefixed by a number or date.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto"
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None):
self.defaults = defaults or {}
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
return self.defaults.get("ask_initial", False)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def __init__(self, specified_apps=set()):
self.specified_apps = specified_apps
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# Don't ask for django.contrib apps
app = cache.get_app(app_label)
if app.__name__.startswith("django.contrib"):
return False
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Now ask
return self._boolean_input("Do you want to enable migrations for app '%s'? [y/N]" % app_label, False)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default;\n" % (field_name, model_name) +
"this is not possible. Please select a fix:",
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
print("Please enter the default value now, as valid Python")
print("The datetime module is available, so you can do e.g. datetime.date.today()")
while True:
code = input(">>> ")
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
else:
break
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self._boolean_input("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False)
| 46.15873 | 170 | 0.574376 |
79162166f30cbb0a1913750bea5118e02e4fa714 | 443 | py | Python | kolibri/core/device/upgrade.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 545 | 2016-01-19T19:26:55.000Z | 2022-03-20T00:13:04.000Z | kolibri/core/device/upgrade.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 8,329 | 2016-01-19T19:32:02.000Z | 2022-03-31T21:23:12.000Z | kolibri/core/device/upgrade.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 493 | 2016-01-19T19:26:48.000Z | 2022-03-28T14:35:05.000Z | """
A file to contain specific logic to handle version upgrades in Kolibri.
"""
from shutil import rmtree
from django.conf import settings
from kolibri.core.upgrade import version_upgrade
# Before 0.15 we copied static files to the KOLIBRI_HOME directory.
# After 0.15 we read them directly from their source directories.
@version_upgrade(old_version="<0.15.0")
def clear_static_dir():
rmtree(settings.STATIC_ROOT, ignore_errors=True)
| 27.6875 | 71 | 0.78781 |
043aff0397ce16a7ec713f89cd2aa17550cdd7ab | 2,942 | py | Python | src/auv_qualification_imu_no_depth.py | heyuhang0/SAUVC2019 | 111a2ac5936b95c75930394a3df63536a47d61e9 | [
"Apache-2.0"
] | 1 | 2018-11-16T13:05:48.000Z | 2018-11-16T13:05:48.000Z | src/auv_qualification_imu_no_depth.py | heyuhang0/SAUVC2019 | 111a2ac5936b95c75930394a3df63536a47d61e9 | [
"Apache-2.0"
] | null | null | null | src/auv_qualification_imu_no_depth.py | heyuhang0/SAUVC2019 | 111a2ac5936b95c75930394a3df63536a47d61e9 | [
"Apache-2.0"
] | 2 | 2020-02-28T02:51:51.000Z | 2021-03-23T06:17:37.000Z | import time
import argparse
from tracking import CVManager
from tracking import GateTrackerV3
from mcu import MCU
from imu import IMU
from pid import *
def add_list(list1, list2, list3 ,list4):
finalListValues = []
for i in range(5):
a = list1[i]
b = list2[i]
c = list3[i]
d = list4[i]
finalValues = a + b + c + d
finalListValues.append(finalValues)
return finalListValues
def main():
# read arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--speed")
args = vars(ap.parse_args())
set_speed = args.get('speed', 0)
if set_speed is None:
set_speed = 0
set_speed = float(set_speed)
speed = 0
# inits MCU
mcu = MCU(2222)
# inits IMU
imu = IMU("/dev/ttyUSB_IMU")
# start subprocess
imu.start()
mcu.start()
mcu.wait()
start_time = time.time()
depth_speed = 0
pidR = pidRoll(0.4, 0, 0) # 1, 0 , 0
pidP = pidPitch(0.6, 0, 0)# 5 ,0.1 ,8
pidD = pidDepth(0, 0, 0)
pidY = pidYaw(1, 0.4, 0)
motor_fl, motor_fr, motor_bl, motor_br, motor_t = 0, 0, 0, 0, 0
try:
motor_fl, motor_fr, motor_bl, motor_br, motor_t = 0, 0, 0, 0, 0
counter = 0
while True:
counter += 1
depth = mcu.get_depth()
pinger = mcu.get_angle()
pitch = imu.get_pitch()
roll = imu.get_roll()
yaw = imu.get_yaw2()
pidR.getSetValues(roll)
pidP.getSetValues(pitch)
pidD.getSetValues(70-depth)
pidY.getSetValues(-yaw)
finalPidValues = add_list(pidR.start(), pidP.start(), pidD.start(), pidY.start())
sentValues = []
for values in finalPidValues:
subValues = values
sentValues.append(subValues)
if abs((time.time() - start_time) % 5) < 1:
depth_speed = 0.4
else:
depth_speed = 0
motor_fl = sentValues[0] + depth_speed
motor_fr = sentValues[1] + depth_speed
motor_bl = sentValues[2] + set_speed
motor_br = sentValues[3] + set_speed
motor_t = sentValues[4] + depth_speed
mcu.set_motors(motor_fl, motor_fr, motor_bl, motor_br, motor_t)
if counter % 5 == 0:
print('Depth:', depth)
print('Pinger:', pinger)
print('Pitch:', pitch)
print('Roll:', roll)
print('Yaw:', imu.get_yaw2())
print('Yaw_sent:', yaw)
print('Motors: %.2f %.2f %.2f %.2f %.2f'%(motor_fl, motor_fr, motor_bl, motor_br, motor_t))
print()
time.sleep(0.1)
except KeyboardInterrupt:
pass
finally:
print("Stopping remaining threads...")
imu.stop()
mcu.stop()
if __name__ == '__main__':
main()
| 27.240741 | 107 | 0.532971 |
0f58b2c204b59eef3e9b860a7a571bc9c89d4f16 | 5,802 | py | Python | consoleiotools.py | kyan001/PyPrintTool | 00ebad8235c4d5237543479435d5e3b4882866c5 | [
"MIT"
] | 1 | 2018-10-21T16:43:08.000Z | 2018-10-21T16:43:08.000Z | consoleiotools.py | kyan001/PyPrintTool | 00ebad8235c4d5237543479435d5e3b4882866c5 | [
"MIT"
] | null | null | null | consoleiotools.py | kyan001/PyPrintTool | 00ebad8235c4d5237543479435d5e3b4882866c5 | [
"MIT"
] | 1 | 2017-05-21T20:41:33.000Z | 2017-05-21T20:41:33.000Z | from functools import wraps
import os
import colorama
from colorama import Fore, Back, Style
colorama.init()
__version__ = "2.8.5"
def as_session(name_or_func): # decorator
"""print start/title/end info before and after the function call
Args:
title: title will show after the start, if has any
"""
if callable(name_or_func): # no name provided
func = name_or_func
name = func.__name__
name = "".join([(' ' + x) if x.isupper() else x for x in name])
name = name.replace('_', ' ')
return as_session(name)(func) # deco(func) -> deco(name)(func)
else:
name = name_or_func
def get_func(func):
@wraps(func)
def wrapper(*args, **kwargs):
start()
title(name)
result = func(*args, **kwargs)
end()
return result
return wrapper
return get_func
def start():
print('*')
def end():
print('`')
def br(count=1):
"""print 1 to N blank lines"""
print('\n' * (count - 1))
def echo(msg, pre=""):
prefix = Style.DIM + Fore.WHITE + '({}) '.format(pre.capitalize()) + Fore.RESET + Style.RESET_ALL if pre else ''
print("| " + Back.BLACK + "{pf}{msg}".format(pf=prefix, msg=msg) + Back.RESET + Fore.RESET + Style.RESET_ALL)
def title(msg, **options):
"""print something like a title"""
return echo(Style.BRIGHT + Fore.CYAN + "__{}__________________________".format(msg.upper().strip()) + Style.RESET_ALL + Fore.RESET, **options)
def ask(msg, **options):
return echo(Fore.YELLOW + msg, "?", **options)
def info(msg, **options):
return echo(msg, "info", **options)
def warn(msg, **options):
return echo(Fore.RED + msg, "warning", **options)
def err(msg, **options):
return echo(Back.RED + Fore.WHITE + Style.BRIGHT + msg, "error", **options)
def dim(msg, **options):
return echo(Style.DIM + Fore.WHITE + msg, **options)
def pause(msg="Press Enter to Continue..."):
"""press to continue"""
print('\n' + Fore.YELLOW + msg + Fore.RESET, end='')
input()
def bye(msg=''):
"""print msg and exit"""
exit(msg)
def get_input(question='', prompt='> '):
if question:
ask(question)
return str(input(prompt)).strip()
def get_choice(choices, exitable: bool = False):
"""Get user choice from a given list
Args:
choices: list. The list that user can choose from.
exitable: bool. Does `exit` is an option for user to select.
"""
EXIT_WORD = "exit" if "0" in choices else "0"
for index, item in enumerate(choices, start=1):
assemble_print = "{Fore.YELLOW}{num:>2}){Fore.RESET} {Fore.WHITE}{itm}{Fore.RESET}".format(Fore=Fore, num=index, itm=item)
echo(assemble_print)
if exitable:
echo("{Fore.YELLOW}{word:>2}) ** EXIT **{Fore.RESET}".format(Fore=Fore, word=EXIT_WORD))
user_choice = get_input().strip()
if exitable and user_choice == EXIT_WORD:
return None
if user_choice in choices:
return user_choice
if user_choice.isdigit():
index = int(user_choice) - 1
if 0 <= index < len(choices):
return choices[index]
err("Please enter a valid choice.")
return get_choice(choices)
def get_choices(choices, allable: bool = False, exitable: bool = False) -> list:
def toggle_listitem(itm, lst: list):
if itm in lst:
lst.remove(itm)
else:
lst.append(itm)
return lst
EXIT_WORD = "exit" if "0" in choices else "0"
DONE_WORD = "done" if "0" in choices else "0"
ALL_WORD = "all" if "a" in choices else "a"
user_choices = []
while True:
if allable:
echo("{Fore.YELLOW}{word:>2}) ** ALL **{Fore.RESET}".format(Fore=Fore, word=ALL_WORD))
for index, item in enumerate(choices, start=1):
mark = "[+]" if item in user_choices else "[ ]" # item is selected or not
assemble_print = "{Fore.YELLOW}{num:>2}){Fore.RESET} {mark} {Fore.WHITE}{itm}{Fore.RESET}".format(Fore=Fore, num=index, itm=item, mark=mark)
echo(assemble_print)
if user_choices: # user selections > 0
echo("{Fore.YELLOW}{word:>2}) ** DONE **{Fore.RESET}".format(Fore=Fore, word=DONE_WORD))
elif exitable: # no user selection, but exitable is on.
echo("{Fore.YELLOW}{word:>2}) ** EXIT **{Fore.RESET}".format(Fore=Fore, word=EXIT_WORD))
user_choice = get_input().strip()
if (user_choice == DONE_WORD or user_choice == EXIT_WORD):
if exitable or len(user_choices) > 0: # keep looping when not exitable and no user choices.
return user_choices
if allable and user_choice == ALL_WORD:
if len(user_choices) == len(choices):
user_choices = []
else:
user_choices = choices.copy()
elif user_choice in choices:
user_choices = toggle_listitem(user_choice, user_choices)
elif user_choice.isdigit() and 0 < int(user_choice) <= len(choices):
index = int(user_choice) - 1
user_choices = toggle_listitem(choices[index], user_choices)
else:
err("Please enter a valid choice.")
def read_file(path: str, with_encoding: bool = False, **kwargs):
for enc in ("utf-8", 'gbk', 'cp1252', 'windows-1252', 'latin-1'):
try:
with open(path, mode='r', encoding=enc, **kwargs) as f:
return (f.read(), enc) if with_encoding else f.read()
except UnicodeDecodeError:
pass
def write_file(path: str, content: str, overwrite: bool = False, **kwargs):
mode = 'w' if overwrite else 'a'
with open(path, mode=mode, encoding='utf-8', **kwargs) as fl:
return fl.write(content)
| 32.595506 | 152 | 0.599276 |
f7edd0c680f9aebe037c2fc3660479acac6bee40 | 220 | py | Python | ENV/lib/python3.6/site-packages/pyramid/tests/test_config/pkgs/scannable/subpackage/notinit.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid/tests/test_config/pkgs/scannable/subpackage/notinit.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid/tests/test_config/pkgs/scannable/subpackage/notinit.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | from pyramid.view import view_config
from pyramid.renderers import null_renderer
@view_config(name='subpackage_notinit', renderer=null_renderer)
def subpackage_notinit(context, request):
return 'subpackage_notinit'
| 31.428571 | 63 | 0.836364 |
b992bc624c692f14b15a862d34af24e17bf72e6a | 4,664 | py | Python | src/settings.py | cp4cds/cmip6_qc | 0015e7a62b80ea577a664cea5f4a24de28ae5b99 | [
"BSD-3-Clause"
] | null | null | null | src/settings.py | cp4cds/cmip6_qc | 0015e7a62b80ea577a664cea5f4a24de28ae5b99 | [
"BSD-3-Clause"
] | 4 | 2020-04-30T12:12:04.000Z | 2020-05-01T10:51:51.000Z | src/settings.py | cp4cds/cmip6_qc | 0015e7a62b80ea577a664cea5f4a24de28ae5b99 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env Python
EXIT_AFTER_N_FAILURES = 1000000
CMIP6_ARCHIVE_DIR = "/badc/cmip6/data/"
qc_choices = ["all", "cfchecker", "prepare", "nctime"]
SETUP_ENV_FILE = "/gws/nopw/j04/cp4cds1_vol3/c3s_34g/cmip6_qc/setup-env.sh"
#
# START_DATE = '1900-01-01'
#
# END_DATE = '2000-01-01'
# lotus settings
QUEUE = 'short-serial'
WALLCLOCK = '24:00:00'
# Output path templates
#LOTUS_OUTPUT_PATH_TMPL = "{current_directory}/lotus-slurm-logs/{cmip6}/{mip}/{inst}/{model}/{experiment}"
LOTUS_OUTPUT_PATH_TMPL = "{current_directory}/lotus-slurm-logs/{cmip6}/{mip}/{inst}/{model}"
CF_OUTPUT_PATH_TMPL = "{current_directory}/qc_logs/cf/{cmip6}/{mip}/{inst}/{model}/{experiment}/{ensemble}/{table}"
# SUCCESS_PATH_TMPL = "{current_directory}/ALL_OUTPUTS/success/{stat}/{model}/{ensemble}"
# BAD_DATA_PATH_TMPL = "{current_directory}/ALL_OUTPUTS/bad_data/{stat}/{model}/{ensemble}"
# BAD_NUM_PATH_TMPL = "{current_directory}/ALL_OUTPUTS/bad_num/{stat}/{model}/{ensemble}"
# NO_OUTPUT_PATH_TMPL = "{current_directory}/ALL_OUTPUTS/no_output/{stat}/{model}/{ensemble}"
EXPERIMENTS = ['amip', 'historical', 'piControl', 'ssp119', 'ssp126', 'ssp245', 'ssp370', 'ssp434', 'ssp460', 'ssp534-over', 'ssp585', 'dcppA-hindcast', 'dcppB-forecast' ]
SPEC_CHARS = [ '[', ']', '"', '\\']
CF_ERROR_LEVEL = {}
CF_ERROR_LEVEL["(2.3): Invalid variable name'] on variable 3basin"] = "major"
CF_ERROR_LEVEL["(7.1): bounds attribute referencing non-existent variable"] = "major"
CF_ERROR_LEVEL["(5): co-ordinate variable not monotonic"] = "major"
CF_ERROR_LEVEL["(7.1): Incorrect number of dimensions for boundary variable: time_bounds"] = "major"
CF_ERROR_LEVEL["(4.3.3): ap_bnds is not declared as a variable"] = "na"
CF_ERROR_LEVEL["(4.3.3): b_bnds is not declared as a variable"] = "na"
CF_ERROR_LEVEL["(4.3.3): ps is not declared as a variable"] = "na"
CF_ERROR_LEVEL["(5): Dimensions must be a subset of dimension"] = "na"
CF_ERROR_LEVEL["(7.1): Boundary var lev_bnds has inconsistent standard_name to lev"] = "na"
CF_ERROR_LEVEL["(7.2): Invalid cell_measures syntax"] = "na"
CF_ERROR_LEVEL["(7.1): Boundary var time_bnds should not have attribute units"] = "minor"
CF_ERROR_LEVEL["'(7.1): Boundary var time_bnds should not have attribute units'"] = "minor"
CF_ERROR_LEVEL["Attribute missing_value of incorrect type (expecting 'Data Variable' type, got 'Numeric' type)"] = "minor"
CF_ERROR_LEVEL["external variable must not be present in this file"] = "minor"
CF_ERROR_LEVEL["Invalid attribute name: _CoordinateAxisType"] = "minor"
CF_ERROR_LEVEL["(2.6.3): Variable areacella named as an external variable must not be present in this file"] = "minor"
CF_ERROR_LEVEL["(3.1): Units are not consistent with those given in the standard_name table."] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: olevel"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: Latitude"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name modifier"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: bounds"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid syntax for 'standard_name' attribute: 'number of layers'"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: alevel"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: Vertical"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid standard_name: ocean_sigma_z"] = "minor"
CF_ERROR_LEVEL["(3.3): Invalid region name:"] = "minor"
CF_ERROR_LEVEL["(4.3.3): Formula term nsigma not present in formula for ocean_sigma_z_coordinate"] = "minor"
CF_ERROR_LEVEL["(4.3.3): formula_terms attribute only allowed on coordinate variables"] = "minor"
CF_ERROR_LEVEL["(4.3.3): No formula defined for standard name: ocean_sigma_z"] = "minor"
CF_ERROR_LEVEL["(4.3.3): Formula term nsigma not present in formula for ocean_sigma_z_coordinate"] = "minor"
CF_ERROR_LEVEL["(5): coordinates attribute referencing non-existent variable"] = "minor"
CF_ERROR_LEVEL["(7.3): Invalid syntax for cell_methods attribute"] = "minor"
CF_ERROR_LEVEL["(7.3): Invalid 'name' in cell_methods attribute"] = "minor"
CF_ERROR_LEVEL["(7.1): Incorrect dimensions for boundary variable: lat_bnds"] = "minor"
CF_ERROR_LEVEL["(7.1): Boundary var lev_bnds has inconsistent units to lev"] = "minor"
CF_ERROR_LEVEL["(7.1): Boundary var time_bnds has inconsistent calendar to time"] = "minor"
CF_ERROR_LEVEL["(7.2): cell_measures variable areacello must either exist in this netCDF file or be named by the external_variables attribute"] = "minor"
CF_ERROR_LEVEL["(7.2): cell_measures variable areacella must either exist in this netCDF file or be named by the external_variables attribute"] = "minor"
CF_ERROR_LEVEL["(7.3): Invalid unit hours, in cell_methods comment"] = "minor"
| 61.368421 | 171 | 0.739923 |
7734df89ded67adcc850d12fe91c0b0402b488c1 | 3,592 | py | Python | src/modules/configs.py | tomzig16/adbepy | ddcad646296c8d4a7806a4a24e993d53d64d1971 | [
"MIT"
] | 3 | 2019-07-06T20:58:19.000Z | 2019-07-17T04:55:44.000Z | src/modules/configs.py | tomzig16/adbepy | ddcad646296c8d4a7806a4a24e993d53d64d1971 | [
"MIT"
] | 1 | 2019-07-09T19:58:31.000Z | 2019-07-09T19:58:31.000Z | src/modules/configs.py | tomzig16/adbepy | ddcad646296c8d4a7806a4a24e993d53d64d1971 | [
"MIT"
] | null | null | null | from subprocess import Popen, PIPE
import re
import os
from platform import system
resFolder = os.path.join(os.path.dirname(__file__), "../res/")
configFileName = "adbepy.config"
configFilePath = os.path.join(resFolder, configFileName)
def GetDefaultSDKPath():
proc = Popen(["adb"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate(input=None, timeout=None)
if err.decode("UTF-8"):
# if adb is not added to PATH env variables
userPath = os.path.expanduser("~")
pathToSDK = "Library/Android/sdk"
if system() == "Windows":
pathToSDK = "AppData\\Local\\Android\\Sdk"
return os.path.join(userPath, pathToSDK)
else:
matchObj = re.search(r"Installed as (.+(?=\n))", output.decode("ascii"), flags=0)
return os.path.abspath(os.path.join(matchObj.group(1), "../../"))
class ConfigDataFields:
sdk_location = "SDK_FOLDER"
screenshot_location = "SCREENSHOT_LOC"
recorded_video_location = "RECVID_LOC"
defaultConfigValues = {
ConfigDataFields.sdk_location:{
"value": GetDefaultSDKPath(),
"description": "SDK folder location"
},
ConfigDataFields.screenshot_location: {
"value": os.path.abspath(os.path.join(resFolder, "../../screenshots/")),
"description": "Default screenshot location"
},
ConfigDataFields.recorded_video_location: {
"value": os.path.abspath(os.path.join(resFolder, "../../videos/")),
"description": "Default path for recorded videos"
}
}
def GetFieldData(field):
"""
field - expected any field from ConfigDataFields class. You can send a simple
string with property name from adbepy.config file (useful if using custom added
fields) but it is highly recommended to use ConfigDataFields in order to
prevent mistakes and stay consistent
"""
try:
f = open(configFilePath, mode='r', buffering=-1)
except FileNotFoundError:
GenerateConfigFile("")
return defaultConfigValues[field]["value"]
for line in f:
if line[0] == "#": continue
if field in line:
value = line.split('=', 1)[1].rstrip()
if not value: return defaultConfigValues[field]["value"]
else: return value
def GetConfigs():
"""
Returns whole config file as a dictionary. Use ConfigDataFields variables
as keys for returned dictionary
"""
output = {}
try:
f = open(configFilePath, mode='r', buffering=-1)
# if file is found
for line in f:
if line[0] == "#": continue
if "=" in line:
configProps = line.split('=', 1)
output[configProps[0]] = configProps[1].rstrip()
if not output[configProps[0]]:
output[configProps[0]] = defaultConfigValues[configProps[0]]["value"]
except FileNotFoundError:
GenerateConfigFile("")
for defKey in defaultConfigValues:
output[defKey] = defaultConfigValues[defKey]["value"]
return output
def GenerateConfigFile(args):
"""
Generates new adbepy.config file (overwrites if such file already exists!)
"""
if not os.path.exists(resFolder):
os.mkdir(resFolder)
f = open(configFilePath, mode='w')
for defKey in defaultConfigValues:
f.write("# " + defaultConfigValues[defKey]["description"] + "\n")
f.write(defKey + "=" + defaultConfigValues[defKey]["value"] + "\n")
print("New adbepy.config file has been generated. Path: %s" % (os.path.abspath(configFilePath)))
| 36.282828 | 100 | 0.633909 |
5825e7d6c228d7999a7816ed60040103e791da63 | 453 | py | Python | tests/test_main_window.py | pkuehne/phil | c06fd74e0f2ffb45eab72720b6ab9a50c7aa7b23 | [
"MIT"
] | null | null | null | tests/test_main_window.py | pkuehne/phil | c06fd74e0f2ffb45eab72720b6ab9a50c7aa7b23 | [
"MIT"
] | null | null | null | tests/test_main_window.py | pkuehne/phil | c06fd74e0f2ffb45eab72720b6ab9a50c7aa7b23 | [
"MIT"
] | 1 | 2020-08-17T19:40:11.000Z | 2020-08-17T19:40:11.000Z | """ Tests for the main window """
from PyQt5.QtWidgets import QFileDialog
from phil.main_window import MainWindow
def test_load_folder_does_nothin_if_aborted(qtbot, monkeypatch):
""" If the user aborts opening a folder, nothing should happen """
# Given
window = MainWindow()
qtbot.addWidget(window)
monkeypatch.setattr(QFileDialog, "getExistingDirectory", lambda _, __, ___: "")
# When
window.load_folder()
# Then
| 23.842105 | 83 | 0.715232 |
22ceb7e76cdaaa5427a4c31e24a84630e8486a25 | 2,355 | py | Python | examples/00_empty_scene/generate_jpg_from_exr.py | americast/ml-hypersim | 6cbaa80207f44a312654e288cf445016c84658a1 | [
"AML"
] | 1,246 | 2020-11-17T05:13:36.000Z | 2022-03-31T13:42:40.000Z | examples/00_empty_scene/generate_jpg_from_exr.py | mikeroberts3000/ml-hypersim | 75b363ee52fbdbd0cc9b554c34c1aadea404183e | [
"AML"
] | 44 | 2020-12-20T19:54:30.000Z | 2022-03-30T07:17:03.000Z | examples/00_empty_scene/generate_jpg_from_exr.py | mikeroberts3000/ml-hypersim | 75b363ee52fbdbd0cc9b554c34c1aadea404183e | [
"AML"
] | 101 | 2020-11-16T22:47:13.000Z | 2022-03-26T11:25:15.000Z | #
# For licensing see accompanying LICENSE.txt file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
from pylab import *
import argparse
import cv2
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument("--in_files", required=True)
parser.add_argument("--out_dir", required=True)
parser.add_argument("--tone_mapping_mode", required=True)
parser.add_argument("--gamma_correction", action="store_true")
args = parser.parse_args()
assert args.tone_mapping_mode == "linear" or args.tone_mapping_mode == "exponential"
print("Begin...")
input_dir = os.path.dirname(args.in_files)
if input_dir == "":
input_dir = "."
assert os.path.exists(input_dir)
if not os.path.exists(args.out_dir): os.makedirs(args.out_dir)
in_filenames = [ os.path.basename(f) for f in sort(glob.glob(args.in_files)) ]
for in_filename in in_filenames:
in_file = os.path.join(input_dir, in_filename)
in_filename_root = os.path.splitext(in_filename)[0]
in_filename_ext = os.path.splitext(in_filename)[1]
out_file = os.path.join(args.out_dir, in_filename_root + ".jpg")
print("Saving " + out_file + "...")
# load file
in_rgb_color = cv2.imread(in_file, cv2.IMREAD_UNCHANGED)[:,:,[2,1,0]]
# apply color mapping with sensible default parameters that are equivalent to V-Ray's "exponential mode"
if args.tone_mapping_mode == "exponential":
rgb_color = in_rgb_color.copy()
dark_mult = 2.3
bright_mult = 2.3
k = clip(rgb_color,0,1)
k = dark_mult*(1.0-k) + bright_mult*k
rgb_color_exp = 1.0 - exp(-rgb_color*k)
out_rgb_color = rgb_color_exp
if args.tone_mapping_mode == "linear":
out_rgb_color = in_rgb_color.copy()
in_rgb_color = out_rgb_color
# apply gamma correction with sensible default parameters
if args.gamma_correction:
rgb_color = in_rgb_color.copy()
gamma = 1.0/2.2
rgb_color_gamma = np.power(np.maximum(rgb_color,0), gamma)
out_rgb_color = rgb_color_gamma
in_rgb_color = out_rgb_color
# clip
rgb_color = in_rgb_color.copy()
rgb_color_clip = clip(rgb_color,0,1)
out_rgb_color = rgb_color_clip
imsave(out_file, out_rgb_color)
print("Finished.")
| 25.597826 | 108 | 0.665817 |
d970f9d7726a379b29e4f86a10aabec6e7fead04 | 3,774 | py | Python | oslo_log/watchers.py | kaptnemo/oslo.log | b97137fb8e418b08067b7ad49c176f4bfc45e916 | [
"Apache-2.0"
] | 65 | 2015-01-23T05:49:09.000Z | 2022-01-09T06:54:33.000Z | oslo_log/watchers.py | kaptnemo/oslo.log | b97137fb8e418b08067b7ad49c176f4bfc45e916 | [
"Apache-2.0"
] | null | null | null | oslo_log/watchers.py | kaptnemo/oslo.log | b97137fb8e418b08067b7ad49c176f4bfc45e916 | [
"Apache-2.0"
] | 50 | 2015-02-02T14:26:24.000Z | 2021-03-12T06:13:16.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
import logging.config
import logging.handlers
import os
import pyinotify
import stat
import time
try:
import syslog
except ImportError:
syslog = None
"""Linux specific pyinotify based logging handlers"""
class _FileKeeper(pyinotify.ProcessEvent):
def my_init(self, watched_handler, watched_file):
self._watched_handler = watched_handler
self._watched_file = watched_file
def process_default(self, event):
if event.name == self._watched_file:
self._watched_handler.reopen_file()
class _EventletThreadedNotifier(pyinotify.ThreadedNotifier):
def loop(self):
"""Eventlet friendly ThreadedNotifier
EventletFriendlyThreadedNotifier contains additional time.sleep()
call insude loop to allow switching to other thread when eventlet
is used.
It can be used with eventlet and native threads as well.
"""
while not self._stop_event.is_set():
self.process_events()
time.sleep(0)
ref_time = time.time()
if self.check_events():
self._sleep(ref_time)
self.read_events()
class FastWatchedFileHandler(logging.handlers.WatchedFileHandler, object):
"""Frequency of reading events.
Watching thread sleeps max(0, READ_FREQ - (TIMEOUT / 1000)) seconds.
"""
READ_FREQ = 5
"""Poll timeout in milliseconds.
See https://docs.python.org/2/library/select.html#select.poll.poll"""
TIMEOUT = 5
def __init__(self, logpath, *args, **kwargs):
self._log_file = os.path.basename(logpath)
self._log_dir = os.path.dirname(logpath)
super(FastWatchedFileHandler, self).__init__(logpath, *args, **kwargs)
self._watch_file()
def _watch_file(self):
mask = pyinotify.IN_MOVED_FROM | pyinotify.IN_DELETE
watch_manager = pyinotify.WatchManager()
handler = _FileKeeper(watched_handler=self,
watched_file=self._log_file)
notifier = _EventletThreadedNotifier(
watch_manager,
default_proc_fun=handler,
read_freq=FastWatchedFileHandler.READ_FREQ,
timeout=FastWatchedFileHandler.TIMEOUT)
notifier.daemon = True
watch_manager.add_watch(self._log_dir, mask)
notifier.start()
def reopen_file(self):
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if (not sres or
sres[stat.ST_DEV] != self.dev or
sres[stat.ST_INO] != self.ino):
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
| 33.696429 | 78 | 0.641229 |
aa65b9dba167cadb9a8d36af3fa72c62814ba83f | 7,526 | py | Python | main.py | jralha/binance-trading-bot-new-coins | 0a00ffd3199329f5d97ce2b560ad69d51859c3bd | [
"MIT"
] | 1 | 2021-08-28T21:07:00.000Z | 2021-08-28T21:07:00.000Z | main.py | TiagoDS7/binance-trading-bot-new-coins | 5b28c91dcb91e6e6d3848a984e222e85546984d2 | [
"MIT"
] | null | null | null | main.py | TiagoDS7/binance-trading-bot-new-coins | 5b28c91dcb91e6e6d3848a984e222e85546984d2 | [
"MIT"
] | null | null | null | from trade_client import *
from store_order import *
from load_config import *
from datetime import datetime, time
import time
import json
import os.path
# loads local configuration
config = load_config('config.yml')
def get_all_coins():
"""
Returns all coins from Binance
"""
return client.get_all_tickers()
def get_new_coins(all_coins):
"""
Returns new coins and the new coin list
"""
all_coins_recheck = get_all_coins()
return [new_coins for new_coins in all_coins_recheck if new_coins['symbol'] not in [coin['symbol'] for coin in all_coins]], all_coins_recheck
def get_price(coin, pairing):
"""
Get the latest price for a coin
"""
return client.get_ticker(symbol=coin+pairing)['lastPrice']
def main():
"""
Sells, adjusts TP and SL according to trailing values
and buys new coins
"""
# store config deets
tp = config['TRADE_OPTIONS']['TP']
sl = config['TRADE_OPTIONS']['SL']
enable_tsl = config['TRADE_OPTIONS']['ENABLE_TSL']
tsl = config['TRADE_OPTIONS']['TSL']
ttp = config['TRADE_OPTIONS']['TTP']
pairing = config['TRADE_OPTIONS']['PAIRING']
qty = config['TRADE_OPTIONS']['QUANTITY']
frequency = config['TRADE_OPTIONS']['RUN_EVERY']
test_mode = config['TRADE_OPTIONS']['TEST']
all_coins = get_all_coins()
while True:
try:
# check if the order file exists and load the current orders
# basically the sell block and update TP and SL logic
if os.path.isfile('order.json'):
order = load_order('order.json')
for coin in list(order):
# store some necesarry trade info for a sell
stored_price = float(order[coin]['price'])
coin_tp = order[coin]['tp']
coin_sl = order[coin]['sl']
volume = order[coin]['volume']
symbol = coin.split(pairing)[0]
last_price = get_price(symbol, pairing)
# update stop loss and take profit values if threshold is reached
if float(last_price) > stored_price + (stored_price*coin_tp /100) and enable_tsl:
# increase as absolute value for TP
new_tp = float(last_price) + (float(last_price)*ttp /100)
# convert back into % difference from when the coin was bought
new_tp = float( (new_tp - stored_price) / stored_price*100)
# same deal as above, only applied to trailing SL
new_sl = float(last_price) - (float(last_price)*tsl /100)
new_sl = float((new_sl - stored_price) / stored_price*100)
# new values to be added to the json file
order[coin]['tp'] = new_tp
order[coin]['sl'] = new_sl
store_order('order.json', order)
print(f'updated tp: {round(new_tp, 3)} and sl: {round(new_sl, 3)}')
# close trade if tsl is reached or trail option is not enabled
elif float(last_price) < stored_price - (stored_price*sl /100) or float(last_price) > stored_price + (stored_price*tp /100) and not enable_tsl:
try:
# sell for real if test mode is set to false
if not test_mode:
sell = create_order(coin, coin['volume'], 'SELL')
print(f"sold {coin} at {(float(last_price) - stored_price) / float(stored_price)*100}")
# remove order from json file
order.pop(coin)
store_order('order.json', order)
except Exception as e:
print(e)
# store sold trades data
else:
if os.path.isfile('sold.json'):
sold_coins = load_order('sold.json')
else:
sold_coins = {}
if not test_mode:
sold_coins[coin] = sell
store_order('sold.json', sold_coins)
else:
sold_coins[coin] = {
'symbol':coin,
'price':last_price,
'volume':volume,
'time':datetime.timestamp(datetime.now()),
'profit': float(last_price) - stored_price,
'relative_profit': round((float(last_price) - stored_price) / stored_price*100, 3)
}
store_order('sold.json', sold_coins)
else:
order = {}
# store new coins and rechecked coins list here
new_coins, all_coins_recheck = get_new_coins(all_coins)
# the buy block and logic pass
if len(new_coins) > 0:
all_coins = all_coins_recheck
print(f'New coins detected: {new_coins}')
for coin in new_coins:
# buy if the coin hasn't already been bought
if coin['symbol'] not in order and pairing in coin['symbol']:
symbol_only = coin['symbol'].split(pairing)[0]
print(f"Preparing to buy {coin['symbol']}")
price = get_price(symbol_only, pairing)
volume = convert_volume(coin['symbol'], qty, price)
try:
# Run a test trade if true
if config['TRADE_OPTIONS']['TEST']:
order[coin['symbol']] = {
'symbol':symbol_only+pairing,
'price':price,
'volume':volume,
'time':datetime.timestamp(datetime.now()),
'tp': tp,
'sl': sl
}
print('PLACING TEST ORDER')
# place a live order if False
else:
order[coin['symbol']] = create_order(symbol_only+pairing, volume, 'BUY')
order[coin['symbol']]['tp'] = tp
order[coin['symbol']]['sl'] = sl
except Exception as e:
print(e)
else:
print(f"Order created with {volume} on {coin['symbol']}")
store_order('order.json', order)
else:
print(f"New coin detected, but {coin['symbol']} is currently in portfolio, or {pairing} does not match")
else:
pass
except Exception as e:
print(e)
if __name__ == '__main__':
print('working...')
main()
| 38.010101 | 163 | 0.463859 |
710e4eb564650c40b5ae7a4b3dc0e1f0c0b56600 | 12,459 | py | Python | docs/conf.py | slowy07/python-secret-manager | e949b6a56815079d1ffd25d76cbee63cb4a79390 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | slowy07/python-secret-manager | e949b6a56815079d1ffd25d76cbee63cb4a79390 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | slowy07/python-secret-manager | e949b6a56815079d1ffd25d76cbee63cb4a79390 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-secret-manager documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "google-cloud-secret-manager"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-secret-manager",
"github_user": "googleapis",
"github_repo": "python-secret-manager",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-secret-manager-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-secret-manager.tex",
"google-cloud-secret-manager Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-secret-manager",
"google-cloud-secret-manager Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-secret-manager",
"google-cloud-secret-manager Documentation",
author,
"google-cloud-secret-manager",
"google-cloud-secret-manager Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 32.700787 | 88 | 0.706718 |
e2c0f5f02be015369128230d70893f17ead9902e | 870 | py | Python | tests/opcodes/cases/test_map_iter_173.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/opcodes/cases/test_map_iter_173.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/opcodes/cases/test_map_iter_173.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestmap_iter_173(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_map_iter_173(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/map_iter.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN { Elt 0 100 ; Elt 2 100 } (Pair 0 0)')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Pair 2 200)')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| 33.461538 | 85 | 0.683908 |
3ce44ac93fec59ed957a0ea76e26467e7739a7d5 | 1,036 | py | Python | kubernetes/test/test_apps_v1beta1_deployment_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_apps_v1beta1_deployment_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_apps_v1beta1_deployment_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
class TestAppsV1beta1DeploymentList(unittest.TestCase):
""" AppsV1beta1DeploymentList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentList(self):
"""
Test AppsV1beta1DeploymentList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.apps_v1beta1_deployment_list.AppsV1beta1DeploymentList()
pass
if __name__ == '__main__':
unittest.main()
| 23.022222 | 105 | 0.72973 |
881163159ed1444f00c9fbe722d68a5af5c5ecf4 | 8,341 | py | Python | rlpyt/replays/non_sequence/uniform.py | 2016choang/sfl | c99faa853f610455e8bb7709ac47f9d725499d91 | [
"MIT"
] | 2 | 2021-12-06T03:24:18.000Z | 2022-01-12T01:11:38.000Z | rlpyt/replays/non_sequence/uniform.py | 2016choang/sfl | c99faa853f610455e8bb7709ac47f9d725499d91 | [
"MIT"
] | 1 | 2021-12-06T13:28:44.000Z | 2022-01-06T07:34:24.000Z | rlpyt/replays/non_sequence/uniform.py | 2016choang/sfl | c99faa853f610455e8bb7709ac47f9d725499d91 | [
"MIT"
] | null | null | null |
import math
import numpy as np
from rlpyt.agents.base import AgentInputs
from rlpyt.replays.async_ import AsyncReplayBufferMixin
from rlpyt.replays.base import BaseReplayBuffer
from rlpyt.replays.non_sequence.n_step import NStepReturnBuffer
from rlpyt.utils.buffer import buffer_from_example, get_leading_dims, torchify_buffer
from rlpyt.utils.collections import namedarraytuple
TripletsFromReplay = namedarraytuple("TripletsFromReplay",
["anchor", "pos", "neg"])
class UniformReplay:
def sample_batch(self, batch_B):
T_idxs, B_idxs = self.sample_idxs(batch_B)
return self.extract_batch(T_idxs, B_idxs)
def sample_idxs(self, batch_B):
t, b, f = self.t, self.off_backward, self.off_forward
high = self.T - b - f if self._buffer_full else t - b
low = 0 if self._buffer_full else f
T_idxs = np.random.randint(low=low, high=high, size=(batch_B,))
T_idxs[T_idxs >= t - b] += min(t, b) + f # min for invalid high t.
B_idxs = np.random.randint(low=0, high=self.B, size=(batch_B,))
return T_idxs, B_idxs
class UniformReplayBuffer(UniformReplay, NStepReturnBuffer):
pass
class AsyncUniformReplayBuffer(AsyncReplayBufferMixin, UniformReplayBuffer):
pass
LandmarkSamplesFromReplay = namedarraytuple("LandmarkSamplesFromReplay",
["agent_inputs", "action", "return_", "done", "done_n", "target_inputs", "target_done"])
class LandmarkUniformReplayBuffer(UniformReplayBuffer):
def __init__(self, example, size, B, **kwargs):
super().__init__(example, size, B, kwargs)
self.valid_idxs = np.arange(self.T, dtype=int)
def sample_idxs(self, batch_B):
B_idxs = np.random.randint(low=0, high=self.B, size=(batch_B,))
t, b, f = self.t, self.off_backward, self.off_forward
high = self.T - b - f if self._buffer_full else t - b
low = 0 if self._buffer_full else f
T_idxs = np.zeros((batch_B, ), dtype=int)
if self._buffer_full:
saved = self.samples.mode[t - b:t - b + min(t, b) + f].copy()
self.samples.mode[t - b:t - b + min(t, b) + f] = True
for i, B_idx in enumerate(B_idxs):
mask = self.samples.mode[low:high, B_idx]
T_idxs[i] = np.random.choice(self.valid_idxs[low:high][~mask])
if self._buffer_full and 0 < min(t, b) + f:
self.samples.mode[t - b:t - b + min(t, b) + f] = saved
return T_idxs, B_idxs
def extract_batch(self, T_idxs, B_idxs):
s = self.samples
target_T_idxs = (T_idxs + self.n_step_return) % self.T
batch = LandmarkSamplesFromReplay(
agent_inputs=AgentInputs(
observation=self.extract_observation(T_idxs, B_idxs),
prev_action=s.action[T_idxs - 1, B_idxs],
prev_reward=s.reward[T_idxs - 1, B_idxs],
),
action=s.action[T_idxs, B_idxs],
return_=self.samples_return_[T_idxs, B_idxs],
done=self.samples.done[T_idxs, B_idxs],
done_n=self.samples_done_n[T_idxs, B_idxs],
target_inputs=AgentInputs(
observation=self.extract_observation(target_T_idxs, B_idxs),
prev_action=s.action[target_T_idxs - 1, B_idxs],
prev_reward=s.reward[target_T_idxs - 1, B_idxs],
),
target_done=self.samples.done[target_T_idxs, B_idxs],
)
t_news = np.where(s.done[T_idxs - 1, B_idxs])[0]
batch.agent_inputs.prev_action[t_news] = 0
batch.agent_inputs.prev_reward[t_news] = 0
return torchify_buffer(batch)
class UniformTripletReplayBuffer(BaseReplayBuffer):
def __init__(self, example, size, B, pos_threshold, neg_close_threshold, neg_far_threshold):
self.T = T = math.ceil(size / B)
self.B = B
self.size = T * B
self.pos_threshold = pos_threshold
self.neg_close_threshold = neg_close_threshold
self.neg_far_threshold = neg_far_threshold
self.t = 0 # Cursor (in T dimension).
self.samples = buffer_from_example(example, (T, B),
share_memory=self.async_)
self.episode_bounds = np.zeros((T, B, 2), dtype=int)
self.episode_bounds[:, :, 0] = -self.T
self.episode_bounds[:, :, 1] = self.T
self.episode_start = 0
self._buffer_full = False
def append_samples(self, samples):
T, B = get_leading_dims(samples, n_dim=2) # samples.env.reward.shape[:2]
assert B == self.B
t = self.t
if t + T > self.T: # Wrap.
idxs = np.arange(t, t + T) % self.T
else:
idxs = np.arange(t, t + T)
self.samples[idxs] = samples
if self.episode_start >= t:
bounds = [self.episode_start - self.T, t]
else:
bounds = [self.episode_start, t]
self.episode_bounds[idxs] = bounds
done = samples.done.detach().numpy()
any_done = np.any(done, axis=1)
for done_idx, done_markers in zip(idxs[any_done], done[any_done]):
if self.episode_start >= done_idx + 1:
bounds = [self.episode_start - self.T, done_idx + 1]
else:
bounds = [self.episode_start, done_idx + 1]
self.episode_bounds[self.episode_start: done_idx + 1, done_markers] = bounds
self.episode_start = done_idx + 1
if not self._buffer_full and t + T >= self.T:
self._buffer_full = True # Only changes on first around.
self.t = (t + T) % self.T
return T, idxs # Pass these on to subclass.
def sample_batch(self, batch_B):
t = self.t
high = self.T if self._buffer_full else t - self.neg_far_threshold
low = 0 if self._buffer_full else self.neg_far_threshold
anchor_idxs = np.random.randint(low=low, high=high, size=(batch_B,))
# anchor_idxs[anchor_idxs >= t] += t # min for invalid high t.
anchor_idxs = anchor_idxs % self.T
B_idxs = np.random.randint(low=0, high=self.B, size=(batch_B,))
pos_low = np.maximum(anchor_idxs - self.pos_threshold, self.episode_bounds[anchor_idxs, B_idxs, 0])
upper_bounds = self.episode_bounds[anchor_idxs, B_idxs, 1]
invalid_bounds = anchor_idxs >= upper_bounds
upper_bounds[invalid_bounds] += self.T
pos_high = np.minimum(anchor_idxs + self.pos_threshold + 1, upper_bounds)
upper_bounds[invalid_bounds] -= self.T
pos_idxs = np.random.randint(low=pos_low, high=pos_high, size=(batch_B,))
# pos_idxs = pos_idxs % self.T
# pos_idxs[pos_idxs >= t] += t
pos_idxs = pos_idxs % self.T
left_neg_low = np.maximum(anchor_idxs - self.neg_far_threshold, self.episode_bounds[anchor_idxs, B_idxs, 0])
left_neg_high = anchor_idxs - self.neg_close_threshold + 1
invalid = left_neg_low >= left_neg_high
left_neg_low[invalid] = left_neg_high[invalid] - 1
left_neg_idxs = np.random.randint(low=left_neg_low, high=left_neg_high, size=(batch_B,))
left_range = left_neg_high - left_neg_low
left_range[invalid] = 0
right_neg_low = anchor_idxs + self.neg_close_threshold
right_neg_high = np.minimum(anchor_idxs + self.neg_far_threshold + 1, self.episode_bounds[anchor_idxs, B_idxs, 1])
invalid = right_neg_low >= right_neg_high
right_neg_low[invalid] = right_neg_high[invalid] - 1
right_neg_idxs = np.random.randint(low=right_neg_low, high=right_neg_high, size=(batch_B,))
right_range = right_neg_high - right_neg_low
right_range[invalid] = 0
prob = left_range / np.clip(left_range + right_range, a_min=1, a_max=None)
uniform = np.random.rand(*prob.shape)
neg_idxs = np.where(uniform < prob, left_neg_idxs, right_neg_idxs)
# neg_idxs = neg_idxs % self.T
# neg_idxs[neg_idxs >= t] += t
neg_idxs = neg_idxs % self.T
batch = TripletsFromReplay(
anchor=self.extract_observation(anchor_idxs, B_idxs),
pos=self.extract_observation(pos_idxs, B_idxs),
neg=self.extract_observation(neg_idxs, B_idxs)
)
return torchify_buffer(batch)
def extract_observation(self, T_idxs, B_idxs):
return self.samples.observation[T_idxs, B_idxs]
| 42.126263 | 122 | 0.639851 |
138399bd5be7003c4d1c6d4cf2774a8946266b2c | 2,670 | py | Python | openGaussBase/testcase/KEYWORDS/synonym/Opengauss_Function_Keyword_Synonym_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/synonym/Opengauss_Function_Keyword_Synonym_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/synonym/Opengauss_Function_Keyword_Synonym_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字synonym(非保留),作为目录对象名
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Hostname(unittest.TestCase):
def setUp(self):
logger.info("------------------------ Opengauss_Function_Keyword_Synonym_Case0020 开始执行--------------------------")
# 关键字作为目录对象名不带双引号 - 成功
def test_synonym_1(self):
SqlMdg = commonsh.execut_db_sql('''create directory synonym as '/tmp/';
drop directory synonym;''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带双引号—成功
def test_synonym_2(self):
SqlMdg = commonsh.execut_db_sql('''create directory "synonym" as '/tmp/';
drop directory "synonym";''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带单引号 - 合理报错
def test_synonym_3(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'synonym';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql(''' create directory 'synonym' as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
#关键字作为目录对象名带反引号 - 合理报错
def test_synonym_4(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`synonym\`;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql('''create directory \`synonym\` as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
def tearDown(self):
logger.info('------------------------ Opengauss_Function_Keyword_Synonym_Case0020 执行结束--------------------------') | 36.081081 | 123 | 0.664045 |
2c5562fc837b7386f516c6e70e546df338d7bcaf | 11,269 | py | Python | raspberrypy/control/myo.py | PiscesDream/raspberrypy | 7feabc66f76afa0daf9bb47e16e5317d7ff6145c | [
"MIT"
] | 1 | 2022-01-23T19:21:08.000Z | 2022-01-23T19:21:08.000Z | raspberrypy/control/myo.py | PiscesDream/RaspberryPy | 7feabc66f76afa0daf9bb47e16e5317d7ff6145c | [
"MIT"
] | 2 | 2020-07-22T17:20:33.000Z | 2020-07-23T00:30:55.000Z | raspberrypy/control/myo.py | PiscesDream/RaspberryPy | 7feabc66f76afa0daf9bb47e16e5317d7ff6145c | [
"MIT"
] | 2 | 2019-07-02T00:26:25.000Z | 2020-07-22T17:26:56.000Z | # This script is modified from
# https://github.com/dzhu/myo-raw
# http://www.fernandocosentino.net/pyoconnect/
#
# to enable Myo please execute
# // permission to ttyACM0 - must restart linux user after this
# sudo usermod -a -G dialout $USER
# // dependencies
# sudo pip install pySerial enum34
from __future__ import print_function
import enum
import re
import struct
import sys
import threading
import time
import serial
from serial.tools.list_ports import comports
# common.py
import struct
def pack(fmt, *args):
return struct.pack('<' + fmt, *args)
def unpack(fmt, *args):
return struct.unpack('<' + fmt, *args)
def text(scr, font, txt, pos, clr=(255,255,255)):
scr.blit(font.render(txt, True, clr), pos)
# commmon.py end
def multichr(ords):
if sys.version_info[0] >= 3:
return bytes(ords)
else:
return ''.join(map(chr, ords))
def multiord(b):
if sys.version_info[0] >= 3:
return list(b)
else:
return map(ord, b)
class Arm(enum.Enum):
UNKNOWN = 0
RIGHT = 1
LEFT = 2
class XDirection(enum.Enum):
UNKNOWN = 0
X_TOWARD_WRIST = 1
X_TOWARD_ELBOW = 2
class Pose(enum.Enum):
REST = 0
FIST = 1
WAVE_IN = 2
WAVE_OUT = 3
FINGERS_SPREAD = 4
THUMB_TO_PINKY = 5
UNKNOWN = 255
class Packet(object):
def __init__(self, ords):
self.typ = ords[0]
self.cls = ords[2]
self.cmd = ords[3]
self.payload = multichr(ords[4:])
def __repr__(self):
return 'Packet(%02X, %02X, %02X, [%s])' % \
(self.typ, self.cls, self.cmd,
' '.join('%02X' % b for b in multiord(self.payload)))
class BT(object):
'''Implements the non-Myo-specific details of the Bluetooth protocol.'''
def __init__(self, tty):
self.ser = serial.Serial(port=tty, baudrate=9600, dsrdtr=1)
self.buf = []
self.lock = threading.Lock()
self.handlers = []
## internal data-handling methods
def recv_packet(self, timeout=None):
t0 = time.time()
self.ser.timeout = None
while timeout is None or time.time() < t0 + timeout:
if timeout is not None: self.ser.timeout = t0 + timeout - time.time()
c = self.ser.read()
if not c: return None
ret = self.proc_byte(ord(c))
if ret:
if ret.typ == 0x80:
self.handle_event(ret)
return ret
def recv_packets(self, timeout=.5):
res = []
t0 = time.time()
while time.time() < t0 + timeout:
p = self.recv_packet(t0 + timeout - time.time())
if not p: return res
res.append(p)
return res
def proc_byte(self, c):
if not self.buf:
if c in [0x00, 0x80, 0x08, 0x88]:
self.buf.append(c)
return None
elif len(self.buf) == 1:
self.buf.append(c)
self.packet_len = 4 + (self.buf[0] & 0x07) + self.buf[1]
return None
else:
self.buf.append(c)
if self.packet_len and len(self.buf) == self.packet_len:
p = Packet(self.buf)
self.buf = []
return p
return None
def handle_event(self, p):
for h in self.handlers:
h(p)
def add_handler(self, h):
self.handlers.append(h)
def remove_handler(self, h):
try: self.handlers.remove(h)
except ValueError: pass
def wait_event(self, cls, cmd):
res = [None]
def h(p):
if p.cls == cls and p.cmd == cmd:
res[0] = p
self.add_handler(h)
while res[0] is None:
self.recv_packet()
self.remove_handler(h)
return res[0]
## specific BLE commands
def connect(self, addr):
return self.send_command(6, 3, pack('6sBHHHH', multichr(addr), 0, 6, 6, 64, 0))
def get_connections(self):
return self.send_command(0, 6)
def discover(self):
return self.send_command(6, 2, b'\x01')
def end_scan(self):
return self.send_command(6, 4)
def disconnect(self, h):
return self.send_command(3, 0, pack('B', h))
def read_attr(self, con, attr):
self.send_command(4, 4, pack('BH', con, attr))
return self.wait_event(4, 5)
def write_attr(self, con, attr, val):
self.send_command(4, 5, pack('BHB', con, attr, len(val)) + val)
return self.wait_event(4, 1)
def send_command(self, cls, cmd, payload=b'', wait_resp=True):
s = pack('4B', 0, len(payload), cls, cmd) + payload
self.ser.write(s)
while True:
p = self.recv_packet()
## no timeout, so p won't be None
if p.typ == 0: return p
## not a response: must be an event
self.handle_event(p)
class Myo(object):
'''Implements the Myo-specific communication protocol.'''
def __init__(self, tty=None):
if tty is None:
tty = self.detect_tty()
if tty is None:
raise ValueError('Myo dongle not found!')
self.bt = BT(tty)
self.conn = None
self.emg_handlers = []
self.imu_handlers = []
self.arm_handlers = []
self.pose_handlers = []
def detect_tty(self):
for p in comports():
if re.search(r'PID=2458:0*1', p[2]):
print('using device:', p[0])
return p[0]
return None
def run(self, timeout=None):
self.bt.recv_packet(timeout)
def connect(self):
## stop everything from before
self.bt.end_scan()
self.bt.disconnect(0)
self.bt.disconnect(1)
self.bt.disconnect(2)
## start scanning
print('scanning...')
self.bt.discover()
while True:
p = self.bt.recv_packet()
print('scan response:', p)
if p.payload.endswith(b'\x06\x42\x48\x12\x4A\x7F\x2C\x48\x47\xB9\xDE\x04\xA9\x01\x00\x06\xD5'):
addr = list(multiord(p.payload[2:8]))
break
self.bt.end_scan()
## connect and wait for status event
conn_pkt = self.bt.connect(addr)
self.conn = multiord(conn_pkt.payload)[-1]
self.bt.wait_event(3, 0)
## get firmware version
fw = self.read_attr(0x17)
_, _, _, _, v0, v1, v2, v3 = unpack('BHBBHHHH', fw.payload)
print('firmware version: %d.%d.%d.%d' % (v0, v1, v2, v3))
self.old = (v0 == 0)
if self.old:
## don't know what these do; Myo Connect sends them, though we get data
## fine without them
self.write_attr(0x19, b'\x01\x02\x00\x00')
self.write_attr(0x2f, b'\x01\x00')
self.write_attr(0x2c, b'\x01\x00')
self.write_attr(0x32, b'\x01\x00')
self.write_attr(0x35, b'\x01\x00')
## enable EMG data
self.write_attr(0x28, b'\x01\x00')
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## Sampling rate of the underlying EMG sensor, capped to 1000. If it's
## less than 1000, emg_hz is correct. If it is greater, the actual
## framerate starts dropping inversely. Also, if this is much less than
## 1000, EMG data becomes slower to respond to changes. In conclusion,
## 1000 is probably a good value.
C = 1000
emg_hz = 50
## strength of low-pass filtering of EMG data
emg_smooth = 100
imu_hz = 50
## send sensor parameters, or we don't get any data
self.write_attr(0x19, pack('BBBBHBBBBB', 2, 9, 2, 1, C, emg_smooth, C // emg_hz, imu_hz, 0, 0))
else:
name = self.read_attr(0x03)
print('device name: %s' % name.payload)
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## enable on/off arm notifications
self.write_attr(0x24, b'\x02\x00')
# self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.start_raw()
## add data handlers
def handle_data(p):
if (p.cls, p.cmd) != (4, 5): return
c, attr, typ = unpack('BHB', p.payload[:4])
pay = p.payload[5:]
if attr == 0x27:
vals = unpack('8HB', pay)
## not entirely sure what the last byte is, but it's a bitmask that
## seems to indicate which sensors think they're being moved around or
## something
emg = vals[:8]
moving = vals[8]
self.on_emg(emg, moving)
elif attr == 0x1c:
vals = unpack('10h', pay)
quat = vals[:4]
acc = vals[4:7]
gyro = vals[7:10]
self.on_imu(quat, acc, gyro)
elif attr == 0x23:
typ, val, xdir, _, _ , _ = unpack('6B', pay)
if typ == 1: # on arm
self.on_arm(Arm(val), XDirection(xdir))
elif typ == 2: # removed from arm
self.on_arm(Arm.UNKNOWN, XDirection.UNKNOWN)
elif typ == 3: # pose
self.on_pose(Pose(val))
else:
print('data with unknown attr: %02X %s' % (attr, p))
self.bt.add_handler(handle_data)
def write_attr(self, attr, val):
if self.conn is not None:
self.bt.write_attr(self.conn, attr, val)
def read_attr(self, attr):
if self.conn is not None:
return self.bt.read_attr(self.conn, attr)
return None
def disconnect(self):
if self.conn is not None:
self.bt.disconnect(self.conn)
def start_raw(self):
'''Sending this sequence for v1.0 firmware seems to enable both raw data and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
#self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def mc_start_collection(self):
'''Myo Connect sends this sequence (or a reordering) when starting data
collection for v1.0 firmware; this enables raw data but disables arm and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x09\x01\x01\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x00')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
def mc_end_collection(self):
'''Myo Connect sends this sequence (or a reordering) when ending data collection
for v1.0 firmware; this reenables arm and pose notifications, but
doesn't disable raw data.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x19, b'\x09\x01\x00\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def vibrate(self, length):
if length in xrange(1, 4):
## first byte tells it to vibrate; purpose of second byte is unknown
self.write_attr(0x19, pack('3B', 3, 1, length))
def add_emg_handler(self, h):
self.emg_handlers.append(h)
def add_imu_handler(self, h):
self.imu_handlers.append(h)
def add_pose_handler(self, h):
self.pose_handlers.append(h)
def add_arm_handler(self, h):
self.arm_handlers.append(h)
def on_emg(self, emg, moving):
for h in self.emg_handlers:
h(emg, moving)
def on_imu(self, quat, acc, gyro):
for h in self.imu_handlers:
h(quat, acc, gyro)
def on_pose(self, p):
for h in self.pose_handlers:
h(p)
def on_arm(self, arm, xdir):
for h in self.arm_handlers:
h(arm, xdir)
| 26.703791 | 101 | 0.621439 |
9da8207f39675b41a16b4e0a9729dc9055a237d5 | 13,245 | py | Python | gym/f110_gym/envs/f110_env.py | MTDzi/f1tenth_gym | 0202b154ff37ab94ecab1c21b2a770958461290e | [
"MIT"
] | null | null | null | gym/f110_gym/envs/f110_env.py | MTDzi/f1tenth_gym | 0202b154ff37ab94ecab1c21b2a770958461290e | [
"MIT"
] | 1 | 2022-01-06T14:28:32.000Z | 2022-01-06T14:28:32.000Z | gym/f110_gym/envs/f110_env.py | MTDzi/f1tenth_gym | 0202b154ff37ab94ecab1c21b2a770958461290e | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2020 Joseph Auckley, Matthew O'Kelly, Aman Sinha, Hongrui Zheng
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Author: Hongrui Zheng
'''
# gym imports
import gym
from gym import error, spaces, utils
from gym.utils import seeding
# base classes
from f110_gym.envs.base_classes import Simulator
# others
import numpy as np
import os
import time
# gl
import pyglet
pyglet.options['debug_gl'] = False
from pyglet import gl
# constants
# rendering
VIDEO_W = 600
VIDEO_H = 400
WINDOW_W = 1000
WINDOW_H = 800
class F110Env(gym.Env, utils.EzPickle):
"""
OpenAI gym environment for F1TENTH
Env should be initialized by calling gym.make('f110_gym:f110-v0', **kwargs)
Args:
kwargs:
seed (int, default=12345): seed for random state and reproducibility
map (str, default='vegas'): name of the map used for the environment. Currently, available environments include: 'berlin', 'vegas', 'skirk'. You could use a string of the absolute path to the yaml file of your custom map.
map_ext (str, default='png'): image extension of the map image file. For example 'png', 'pgm'
params (dict, default={'mu': 1.0489, 'C_Sf':, 'C_Sr':, 'lf': 0.15875, 'lr': 0.17145, 'h': 0.074, 'm': 3.74, 'I': 0.04712, 's_min': -0.4189, 's_max': 0.4189, 'sv_min': -3.2, 'sv_max': 3.2, 'v_switch':7.319, 'a_max': 9.51, 'v_min':-5.0, 'v_max': 20.0, 'width': 0.31, 'length': 0.58}): dictionary of vehicle parameters.
mu: surface friction coefficient
C_Sf: Cornering stiffness coefficient, front
C_Sr: Cornering stiffness coefficient, rear
lf: Distance from center of gravity to front axle
lr: Distance from center of gravity to rear axle
h: Height of center of gravity
m: Total mass of the vehicle
I: Moment of inertial of the entire vehicle about the z axis
s_min: Minimum steering angle constraint
s_max: Maximum steering angle constraint
sv_min: Minimum steering velocity constraint
sv_max: Maximum steering velocity constraint
v_switch: Switching velocity (velocity at which the acceleration is no longer able to create wheel spin)
a_max: Maximum longitudinal acceleration
v_min: Minimum longitudinal velocity
v_max: Maximum longitudinal velocity
width: width of the vehicle in meters
length: length of the vehicle in meters
num_agents (int, default=2): number of agents in the environment
timestep (float, default=0.01): physics timestep
ego_idx (int, default=0): ego's index in list of agents
"""
metadata = {'render.modes': ['human', 'human_fast']}
def __init__(self, **kwargs):
# kwargs extraction
try:
self.seed = kwargs['seed']
except:
self.seed = 12345
try:
self.map_name = kwargs['map']
# different default maps
if self.map_name == 'berlin':
self.map_path = os.path.dirname(os.path.abspath(__file__)) + '/maps/berlin.yaml'
elif self.map_name == 'skirk':
self.map_path = os.path.dirname(os.path.abspath(__file__)) + '/maps/skirk.yaml'
elif self.map_name == 'levine':
self.map_path = os.path.dirname(os.path.abspath(__file__)) + '/maps/levine.yaml'
else:
self.map_path = self.map_name + '.yaml'
except:
self.map_path = os.path.dirname(os.path.abspath(__file__)) + '/maps/vegas.yaml'
try:
self.map_ext = kwargs['map_ext']
except:
self.map_ext = '.png'
try:
self.params = kwargs['params']
except:
self.params = {'mu': 1.0489, 'C_Sf': 4.718, 'C_Sr': 5.4562, 'lf': 0.15875, 'lr': 0.17145, 'h': 0.074, 'm': 3.74, 'I': 0.04712, 's_min': -0.4189, 's_max': 0.4189, 'sv_min': -3.2, 'sv_max': 3.2, 'v_switch': 7.319, 'a_max': 9.51, 'v_min':-5.0, 'v_max': 20.0, 'width': 0.31, 'length': 0.58}
# simulation parameters
try:
self.num_agents = kwargs['num_agents']
except:
self.num_agents = 2
try:
self.timestep = kwargs['timestep']
except:
self.timestep = 0.01
# default ego index
try:
self.ego_idx = kwargs['ego_idx']
except:
self.ego_idx = 0
# radius to consider done
self.start_thresh = 0.5 # 10cm
# env states
self.poses_x = []
self.poses_y = []
self.poses_theta = []
self.collisions = np.zeros((self.num_agents, ))
# TODO: collision_idx not used yet
# self.collision_idx = -1 * np.ones((self.num_agents, ))
# loop completion
self.near_start = True
self.num_toggles = 0
# race info
self.lap_times = np.zeros((self.num_agents, ))
self.lap_counts = np.zeros((self.num_agents, ))
self.current_time = 0.0
# finish line info
self.num_toggles = 0
self.near_start = True
self.near_starts = np.array([True]*self.num_agents)
self.toggle_list = np.zeros((self.num_agents,))
self.start_xs = np.zeros((self.num_agents, ))
self.start_ys = np.zeros((self.num_agents, ))
self.start_thetas = np.zeros((self.num_agents, ))
self.start_rot = np.eye(2)
# initiate stuff
self.ignore_collisions = kwargs['ignore_collisions']
self.sim = Simulator(self.params, self.num_agents, self.seed, ignore_collisions=self.ignore_collisions)
self.sim.set_map(self.map_path, self.map_ext)
# rendering
self.renderer = None
self.current_obs = None
def __del__(self):
"""
Finalizer, does cleanup
"""
pass
# if self.renderer is not None:
# self.renderer.close()
def _check_done(self):
"""
Check if the current rollout is done
Args:
None
Returns:
done (bool): whether the rollout is done
toggle_list (list[int]): each agent's toggle list for crossing the finish zone
"""
# this is assuming 2 agents
# TODO: switch to maybe s-based
left_t = 2
right_t = 2
poses_x = np.array(self.poses_x)-self.start_xs
poses_y = np.array(self.poses_y)-self.start_ys
delta_pt = np.dot(self.start_rot, np.stack((poses_x, poses_y), axis=0))
temp_y = delta_pt[1,:]
idx1 = temp_y > left_t
idx2 = temp_y < -right_t
temp_y[idx1] -= left_t
temp_y[idx2] = -right_t - temp_y[idx2]
temp_y[np.invert(np.logical_or(idx1, idx2))] = 0
dist2 = delta_pt[0,:]**2 + temp_y**2
closes = dist2 <= 0.1
for i in range(self.num_agents):
if closes[i] and not self.near_starts[i]:
self.near_starts[i] = True
self.toggle_list[i] += 1
elif not closes[i] and self.near_starts[i]:
self.near_starts[i] = False
self.toggle_list[i] += 1
self.lap_counts[i] = self.toggle_list[i] // 2
if self.toggle_list[i] < 4:
self.lap_times[i] = self.current_time
done = (self.collisions[self.ego_idx]) or np.all(self.toggle_list >= 4)
return done, self.toggle_list >= 4
def _update_state(self, obs_dict):
"""
Update the env's states according to observations
Args:
obs_dict (dict): dictionary of observation
Returns:
None
"""
self.poses_x = obs_dict['poses_x']
self.poses_y = obs_dict['poses_y']
self.poses_theta = obs_dict['poses_theta']
self.collisions = obs_dict['collisions']
def step(self, action):
"""
Step function for the gym env
Args:
action (np.ndarray(num_agents, 2))
Returns:
obs (dict): observation of the current step
reward (float, default=self.timestep): step reward, currently is physics timestep
done (bool): if the simulation is done
info (dict): auxillary information dictionary
"""
# call simulation step
obs = self.sim.step(action)
obs['lap_times'] = self.lap_times
obs['lap_counts'] = self.lap_counts
self.current_obs = obs
# times
reward = self.timestep
self.current_time = self.current_time + self.timestep
# update data member
self._update_state(obs)
# check done
done, toggle_list = self._check_done()
info = {'checkpoint_done': toggle_list}
return obs, reward, done, info
def reset(self, poses):
"""
Reset the gym environment by given poses
Args:
poses (np.ndarray (num_agents, 3)): poses to reset agents to
Returns:
obs (dict): observation of the current step
reward (float, default=self.timestep): step reward, currently is physics timestep
done (bool): if the simulation is done
info (dict): auxillary information dictionary
"""
# reset counters and data members
self.current_time = 0.0
self.collisions = np.zeros((self.num_agents, ))
self.num_toggles = 0
self.near_start = True
self.near_starts = np.array([True]*self.num_agents)
self.toggle_list = np.zeros((self.num_agents,))
# states after reset
self.start_xs = poses[:, 0]
self.start_ys = poses[:, 1]
self.start_thetas = poses[:, 2]
self.start_rot = np.array([[np.cos(-self.start_thetas[self.ego_idx]), -np.sin(-self.start_thetas[self.ego_idx])], [np.sin(-self.start_thetas[self.ego_idx]), np.cos(-self.start_thetas[self.ego_idx])]])
# call reset to simulator
self.sim.reset(poses)
# get no input observations
action = np.zeros((self.num_agents, 2))
obs, reward, done, info = self.step(action)
return obs, reward, done, info
def update_map(self, map_path, map_ext):
"""
Updates the map used by simulation
Args:
map_path (str): absolute path to the map yaml file
map_ext (str): extension of the map image file
Returns:
None
"""
self.sim.set_map(map_path, map_ext)
def update_params(self, params, index=-1):
"""
Updates the parameters used by simulation for vehicles
Args:
params (dict): dictionary of parameters
index (int, default=-1): if >= 0 then only update a specific agent's params
Returns:
None
"""
self.sim.update_params(params, agent_idx=index)
def render(self, mode='human'):
"""
Renders the environment with pyglet. Use mouse scroll in the window to zoom in/out, use mouse click drag to pan. Shows the agents, the map, current fps (bottom left corner), and the race information near as text.
Args:
mode (str, default='human'): rendering mode, currently supports:
'human': slowed down rendering such that the env is rendered in a way that sim time elapsed is close to real time elapsed
'human_fast': render as fast as possible
Returns:
None
"""
assert mode in ['human', 'human_fast']
if self.renderer is None:
# first call, initialize everything
from f110_gym.envs.rendering import EnvRenderer
self.renderer = EnvRenderer(WINDOW_W, WINDOW_H)
self.renderer.update_map(self.map_name, self.map_ext)
self.renderer.update_obs(self.current_obs)
self.renderer.dispatch_events()
self.renderer.on_draw()
self.renderer.flip()
if mode == 'human':
time.sleep(0.005)
elif mode == 'human_fast':
pass | 35.991848 | 328 | 0.600604 |
6f4579f20e591df8bf774aa0c383a05e8bfff949 | 9,592 | py | Python | tests/dml/test_line.py | danielkovarik/python-pptx | 248fe2c1881f3e32e3f0bef34bc568b5eb8a7685 | [
"MIT"
] | null | null | null | tests/dml/test_line.py | danielkovarik/python-pptx | 248fe2c1881f3e32e3f0bef34bc568b5eb8a7685 | [
"MIT"
] | null | null | null | tests/dml/test_line.py | danielkovarik/python-pptx | 248fe2c1881f3e32e3f0bef34bc568b5eb8a7685 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
Test suite for pptx.dml.line module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from pptx.dml.color import ColorFormat
from pptx.dml.fill import FillFormat
from pptx.dml.line import LineFormat
from pptx.enum.dml import MSO_ARROWHEAD, MSO_FILL, MSO_LINE
from pptx.oxml.shapes.shared import CT_LineProperties
from pptx.shapes.autoshape import Shape
from ..oxml.unitdata.dml import an_ln
from ..unitutil.cxml import element, xml
from ..unitutil.mock import call, class_mock, instance_mock, property_mock
class DescribeLineFormat(object):
def it_knows_its_dash_style(self, dash_style_get_fixture):
line, expected_value = dash_style_get_fixture
assert line.dash_style == expected_value
def it_can_change_its_dash_style(self, dash_style_set_fixture):
line, dash_style, spPr, expected_xml = dash_style_set_fixture
line.dash_style = dash_style
assert spPr.xml == expected_xml
def it_knows_its_head_end(self, head_end_get_fixture):
line, expected_value = head_end_get_fixture
assert line.head_end == expected_value
def it_can_change_its_head_end(self, head_end_set_fixture):
line, head_end, spPr, expected_xml = head_end_set_fixture
line.head_end = head_end
assert spPr.xml == expected_xml
def it_knows_its_tail_end(self, tail_end_get_fixture):
line, expected_value = tail_end_get_fixture
assert line.tail_end == expected_value
def it_can_change_its_tail_end(self, tail_end_set_fixture):
line, tail_end, spPr, expected_xml = tail_end_set_fixture
line.tail_end = tail_end
assert spPr.xml == expected_xml
def it_knows_its_width(self, width_get_fixture):
line, expected_line_width = width_get_fixture
assert line.width == expected_line_width
def it_can_change_its_width(self, width_set_fixture):
line, width, expected_xml = width_set_fixture
line.width = width
assert line._ln.xml == expected_xml
def it_has_a_fill(self, fill_fixture):
line, FillFormat_, ln_, fill_ = fill_fixture
fill = line.fill
FillFormat_.from_fill_parent.assert_called_once_with(ln_)
assert fill is fill_
def it_has_a_color(self, color_fixture):
line, fill_, expected_solid_calls, color_ = color_fixture
color = line.color
assert fill_.solid.mock_calls == expected_solid_calls
assert color is color_
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[(MSO_FILL.SOLID, False), (MSO_FILL.BACKGROUND, True), (None, True)]
)
def color_fixture(self, request, line, fill_prop_, fill_, color_):
pre_call_fill_type, solid_call_expected = request.param
fill_.type = pre_call_fill_type
expected_solid_calls = [call()] if solid_call_expected else []
return line, fill_, expected_solid_calls, color_
@pytest.fixture(
params=[
("p:spPr", None),
("p:spPr/a:ln", None),
("p:spPr/a:ln/a:prstDash", None),
("p:spPr/a:ln/a:prstDash{val=dash}", MSO_LINE.DASH),
("p:spPr/a:ln/a:prstDash{val=solid}", MSO_LINE.SOLID),
]
)
def dash_style_get_fixture(self, request):
spPr_cxml, expected_value = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
return line, expected_value
@pytest.fixture(
params=[
("p:spPr{a:b=c}", MSO_LINE.DASH, "p:spPr{a:b=c}/a:ln/a:prstDash{val=dash}"),
("p:spPr/a:ln", MSO_LINE.ROUND_DOT, "p:spPr/a:ln/a:prstDash{val=dot}"),
(
"p:spPr/a:ln/a:prstDash",
MSO_LINE.SOLID,
"p:spPr/a:ln/a:prstDash{val=solid}",
),
(
"p:spPr/a:ln/a:custDash",
MSO_LINE.DASH_DOT,
"p:spPr/a:ln/a:prstDash{val=dashDot}",
),
(
"p:spPr/a:ln/a:prstDash{val=dash}",
MSO_LINE.LONG_DASH,
"p:spPr/a:ln/a:prstDash{val=lgDash}",
),
("p:spPr/a:ln/a:prstDash{val=dash}", None, "p:spPr/a:ln"),
("p:spPr/a:ln/a:custDash", None, "p:spPr/a:ln"),
]
)
def dash_style_set_fixture(self, request):
spPr_cxml, dash_style, expected_cxml = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
expected_xml = xml(expected_cxml)
return line, dash_style, spPr, expected_xml
@pytest.fixture(params=[
('p:spPr', None),
('p:spPr/a:ln', None),
('p:spPr/a:ln/a:headEnd', None),
('p:spPr/a:ln/a:headEnd{type=diamond}', MSO_ARROWHEAD.DIAMOND),
('p:spPr/a:ln/a:headEnd{type=triangle}', MSO_ARROWHEAD.TRIANGLE),
])
def head_end_get_fixture(self, request):
spPr_cxml, expected_value = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
return line, expected_value
@pytest.fixture(params=[
('p:spPr{a:b=c}', MSO_ARROWHEAD.DIAMOND,
'p:spPr{a:b=c}/a:ln/a:headEnd{type=diamond}'),
('p:spPr/a:ln', MSO_ARROWHEAD.TRIANGLE,
'p:spPr/a:ln/a:headEnd{type=triangle}'),
('p:spPr/a:ln/a:headEnd', MSO_ARROWHEAD.OVAL,
'p:spPr/a:ln/a:headEnd{type=oval}'),
('p:spPr/a:ln/a:headEnd', MSO_ARROWHEAD.STEALTH,
'p:spPr/a:ln/a:headEnd{type=stealth}'),
('p:spPr/a:ln/a:headEnd{type=diamond}', MSO_ARROWHEAD.OPEN,
'p:spPr/a:ln/a:headEnd{type=open}'),
('p:spPr/a:ln/a:headEnd{type=diamond}', None,
'p:spPr/a:ln'),
('p:spPr/a:ln/a:headEnd', None,
'p:spPr/a:ln'),
])
def head_end_set_fixture(self, request):
spPr_cxml, head_end, expected_cxml = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
expected_xml = xml(expected_cxml)
return line, head_end, spPr, expected_xml
@pytest.fixture(params=[
('p:spPr', None),
('p:spPr/a:ln', None),
('p:spPr/a:ln/a:tailEnd', None),
('p:spPr/a:ln/a:tailEnd{type=diamond}', MSO_ARROWHEAD.DIAMOND),
('p:spPr/a:ln/a:tailEnd{type=triangle}', MSO_ARROWHEAD.TRIANGLE),
])
def tail_end_get_fixture(self, request):
spPr_cxml, expected_value = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
return line, expected_value
@pytest.fixture(params=[
('p:spPr{a:b=c}', MSO_ARROWHEAD.DIAMOND,
'p:spPr{a:b=c}/a:ln/a:tailEnd{type=diamond}'),
('p:spPr/a:ln', MSO_ARROWHEAD.TRIANGLE,
'p:spPr/a:ln/a:tailEnd{type=triangle}'),
('p:spPr/a:ln/a:tailEnd', MSO_ARROWHEAD.OVAL,
'p:spPr/a:ln/a:tailEnd{type=oval}'),
('p:spPr/a:ln/a:tailEnd', MSO_ARROWHEAD.STEALTH,
'p:spPr/a:ln/a:tailEnd{type=stealth}'),
('p:spPr/a:ln/a:tailEnd{type=diamond}', MSO_ARROWHEAD.OPEN,
'p:spPr/a:ln/a:tailEnd{type=open}'),
('p:spPr/a:ln/a:tailEnd{type=diamond}', None,
'p:spPr/a:ln'),
('p:spPr/a:ln/a:tailEnd', None,
'p:spPr/a:ln'),
])
def tail_end_set_fixture(self, request):
spPr_cxml, tail_end, expected_cxml = request.param
spPr = element(spPr_cxml)
line = LineFormat(spPr)
expected_xml = xml(expected_cxml)
return line, tail_end, spPr, expected_xml
@pytest.fixture
def fill_fixture(self, line, FillFormat_, ln_, fill_):
return line, FillFormat_, ln_, fill_
@pytest.fixture(params=[(None, 0), (12700, 12700)])
def width_get_fixture(self, request, shape_):
w, expected_line_width = request.param
shape_.ln = self.ln_bldr(w).element
line = LineFormat(shape_)
return line, expected_line_width
@pytest.fixture(
params=[
(None, None),
(None, 12700),
(12700, 12700),
(12700, 25400),
(25400, None),
]
)
def width_set_fixture(self, request, shape_):
initial_width, width = request.param
shape_.ln = shape_.get_or_add_ln.return_value = self.ln_bldr(
initial_width
).element
line = LineFormat(shape_)
expected_xml = self.ln_bldr(width).xml()
return line, width, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def color_(self, request):
return instance_mock(request, ColorFormat)
@pytest.fixture
def fill_(self, request, color_):
return instance_mock(request, FillFormat, fore_color=color_)
@pytest.fixture
def fill_prop_(self, request, fill_):
return property_mock(request, LineFormat, "fill", return_value=fill_)
@pytest.fixture
def FillFormat_(self, request, fill_):
FillFormat_ = class_mock(request, "pptx.dml.line.FillFormat")
FillFormat_.from_fill_parent.return_value = fill_
return FillFormat_
@pytest.fixture
def line(self, shape_):
return LineFormat(shape_)
@pytest.fixture
def ln_(self, request):
return instance_mock(request, CT_LineProperties)
def ln_bldr(self, w):
ln_bldr = an_ln().with_nsdecls()
if w is not None:
ln_bldr.with_w(w)
return ln_bldr
@pytest.fixture
def shape_(self, request, ln_):
shape_ = instance_mock(request, Shape)
shape_.get_or_add_ln.return_value = ln_
return shape_
| 35.791045 | 88 | 0.621143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.