text
stringlengths 3
1.05M
|
|---|
import random
import math
import copy
from datasets import miniImageNet_few_shot, tiered_ImageNet_few_shot, ImageNet_few_shot
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
from collections import OrderedDict
import warnings
import models
import time
import data
import utils
import sys
import numpy as np
import os
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision import transforms, datasets
import torch.utils.data
from configs import miniImageNet_path, ISIC_path, ChestX_path, CropDisease_path, EuroSAT_path
torch.cuda.empty_cache()
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# import wandb
class apply_twice:
'''
A wrapper for torchvision transform. The transform is applied twice for
SimCLR training
'''
def __init__(self, transform, transform2=None):
self.transform = transform
if transform2 is not None:
self.transform2 = transform2
else:
self.transform2 = transform
def __call__(self, img):
return self.transform(img), self.transform2(img)
def main(args):
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
torch.cuda.empty_cache()
# Set the scenes
if not os.path.isdir(args.dir):
os.makedirs(args.dir)
logger = utils.create_logger(os.path.join(
args.dir, time.strftime("%Y%m%d-%H%M%S") + '_checkpoint.log'), __name__)
trainlog = utils.savelog(args.dir, 'train')
vallog = utils.savelog(args.dir, 'val')
# wandb.init(project='STARTUP',
# group=__file__,
# name=f'{__file__}_{args.dir}')
# wandb.config.update(args)
for arg in vars(args):
logger.info(f"{arg}: {getattr(args, arg)}")
# seed the random number generator
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
###########################
# Create Models
###########################
if args.model == 'resnet10':
backbone = models.ResNet10()
feature_dim = backbone.final_feat_dim
elif args.model == 'resnet12':
backbone = models.Resnet12(width=1, dropout=0.1)
feature_dim = backbone.output_size
elif args.model == 'resnet18':
backbone = models.resnet18(remove_last_relu=False,
input_high_res=True)
feature_dim = 512
else:
raise ValueError('Invalid backbone model')
backbone_sd_init = copy.deepcopy(backbone.state_dict())
# the student classifier head
clf = nn.Linear(feature_dim, 1000).to(device)
############################
###########################
# Create DataLoader
###########################
# create the base dataset
if args.base_dataset == 'miniImageNet':
base_transform = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
base_transform_test = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(
root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = miniImageNet_few_shot.construct_subset(
base_dataset, args.base_split)
elif args.base_dataset == 'tiered_ImageNet':
if args.image_size != 84:
warnings.warn("Tiered ImageNet: The image size for is not 84x84")
base_transform = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_transform_test = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(
root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = tiered_ImageNet_few_shot.construct_subset(
base_dataset, args.base_split)
elif args.base_dataset == 'ImageNet':
if args.base_no_color_jitter:
base_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
warnings.warn("Using ImageNet with Color Jitter")
base_transform = ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
base_transform_test = ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
base_dataset = datasets.ImageFolder(
root=args.base_path, transform=base_transform)
if args.base_split is not None:
base_dataset = ImageNet_few_shot.construct_subset(
base_dataset, args.base_split)
print("Size of Base dataset:", len(base_dataset))
else:
raise ValueError("Invalid base dataset!")
# create the target dataset
if args.target_dataset == 'ISIC':
transform = ISIC_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = ISIC_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = ISIC_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'EuroSAT':
transform = EuroSAT_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = EuroSAT_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = EuroSAT_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'CropDisease':
transform = CropDisease_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = CropDisease_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = CropDisease_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'ChestX':
transform = Chest_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = Chest_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = Chest_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'miniImageNet_test':
transform = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = miniImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = miniImageNet_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
elif args.target_dataset == 'tiered_ImageNet_test':
if args.image_size != 84:
warnings.warn("Tiered ImageNet: The image size for is not 84x84")
transform = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=True)
transform_test = tiered_ImageNet_few_shot.TransformLoader(
args.image_size).get_composed_transform(aug=False)
dataset = tiered_ImageNet_few_shot.SimpleDataset(
transform, split=args.target_subset_split)
else:
raise ValueError('Invalid dataset!')
print("Size of target dataset", len(dataset))
dataset_test = copy.deepcopy(dataset)
transform_twice = apply_twice(transform)
transform_test_twice = apply_twice(transform_test, transform)
dataset.d.transform = transform_twice
dataset_test.d.transform = transform_test_twice
ind = torch.randperm(len(dataset))
# initialize the student's backbone with random weights
if args.backbone_random_init:
backbone.module.load_state_dict(backbone_sd_init)
# split the target dataset into train and val
# 10% of the unlabeled data is used for validation
train_ind = ind[:int(0.9*len(ind))]
val_ind = ind[int(0.9*len(ind)):]
trainset = torch.utils.data.Subset(dataset, train_ind)
valset = torch.utils.data.Subset(dataset_test, val_ind)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=True, drop_last=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=False, drop_last=False)
# Generate trainset and valset for base dataset
base_ind = torch.randperm(len(base_dataset))
base_train_ind = base_ind[:int((1 - args.base_val_ratio)*len(base_ind))]
base_val_ind = base_ind[int((1 - args.base_val_ratio)*len(base_ind)):]
base_dataset_val = copy.deepcopy(base_dataset)
base_dataset_val.transform = base_transform_test
base_trainset = torch.utils.data.Subset(base_dataset, base_train_ind)
base_valset = torch.utils.data.Subset(base_dataset_val, base_val_ind)
print("Size of base validation set", len(base_valset))
base_trainloader = torch.utils.data.DataLoader(base_trainset, batch_size=args.bsize,
num_workers=args.num_workers,
shuffle=True, drop_last=True)
base_valloader = torch.utils.data.DataLoader(base_valset, batch_size=args.bsize * 2,
num_workers=args.num_workers,
shuffle=False, drop_last=False)
############################
###########################
# Create Optimizer
###########################
for layer in backbone.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.bias.requires_grad = False
layer.weight.requires_grad = False
optimizer = torch.optim.SGD([
{'params': filter(lambda p: p.requires_grad, backbone.parameters())},
{'params': clf.parameters()}
],
lr=0.1, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min', factor=0.5,
patience=10, verbose=False,
cooldown=10,
threshold_mode='rel',
threshold=1e-4, min_lr=1e-5)
#######################################
starting_epoch = 0
# whether to resume from the latest checkpoint
if args.resume_latest:
import re
pattern = "checkpoint_(\d+).pkl"
candidate = []
for i in os.listdir(args.dir):
match = re.search(pattern, i)
if match:
candidate.append(int(match.group(1)))
# if nothing found, then start from scratch
if len(candidate) == 0:
print('No latest candidate found to resume!')
logger.info('No latest candidate found to resume!')
else:
latest = np.amax(candidate)
load_path = os.path.join(args.dir, f'checkpoint_{latest}.pkl')
if latest >= args.epochs:
print('The latest checkpoint found ({}) is after the number of epochs (={}) specified! Exiting!'.format(
load_path, args.epochs))
logger.info('The latest checkpoint found ({}) is after the number of epochs (={}) specified! Exiting!'.format(
load_path, args.epochs))
import sys
sys.exit(0)
else:
best_model_path = os.path.join(args.dir, 'checkpoint_best.pkl')
# first load the previous best model
best_epoch = load_checkpoint(backbone, clf,
optimizer, scheduler, best_model_path, device)
logger.info('Latest model epoch: {}'.format(latest))
logger.info(
'Validate the best model checkpointed at epoch: {}'.format(best_epoch))
# Validate to set the right loss
performance_val = validate(backbone, clf,
base_valloader, valloader,
best_epoch, args.epochs, logger, vallog, args, device, postfix='Validation')
loss_val = performance_val['Loss_test/avg']
error_val = 100 - performance_val['top1_test_per_class/avg']
best_error = error_val
best_loss = loss_val
sd_best = torch.load(os.path.join(
args.dir, 'checkpoint_best.pkl'))
if latest > best_epoch:
starting_epoch = load_checkpoint(
backbone, clf, optimizer, scheduler, load_path, device)
else:
starting_epoch = best_epoch
logger.info(
'Continue Training at epoch: {}'.format(starting_epoch))
###########################################
####### Learning rate test ################
###########################################
if starting_epoch == 0:
# Start by doing a learning rate test
lr_candidates = [1e-1]
step = 50
# number of training epochs to get at least 50 updates
warm_up_epoch = math.ceil(step / len(base_trainloader))
# keep track of the student model initialization
# Need to keep reloading when testing different learning rates
sd_current = copy.deepcopy(backbone.state_dict())
sd_head = copy.deepcopy(clf.state_dict())
vals = []
# Test the learning rate by training for one epoch
for current_lr in lr_candidates:
lr_log = utils.savelog(args.dir, f'lr_{current_lr}')
# reload the student model
backbone.load_state_dict(sd_current)
clf.load_state_dict(sd_head)
# create the optimizer
for layer in backbone.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.bias.requires_grad = False
layer.weight.requires_grad = False
optimizer = torch.optim.SGD([
{'params': filter(lambda p: p.requires_grad,
backbone.parameters())},
{'params': clf.parameters()}
],
lr=current_lr, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
logger.info(f'*** Testing Learning Rate: {current_lr}')
# training for a bit
for i in range(warm_up_epoch):
perf = train(backbone, clf, optimizer,
trainloader, base_trainloader,
i, warm_up_epoch, logger, lr_log, args, device, turn_off_sync=True)
# compute the validation loss for picking learning rates
perf_val = validate(backbone, clf,
base_valloader, valloader,
1, 1, logger, vallog, args, device, postfix='Validation',
turn_off_sync=True)
vals.append(perf_val['Loss_test/avg'])
# pick the best learning rates
current_lr = lr_candidates[int(np.argmin(vals))]
# reload the models
backbone.load_state_dict(sd_current)
clf.load_state_dict(sd_head)
logger.info(f"** Learning with lr: {current_lr}")
for layer in backbone.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.bias.requires_grad = False
layer.weight.requires_grad = False
optimizer = torch.optim.SGD([
{'params': filter(lambda p: p.requires_grad,
backbone.parameters())},
{'params': clf.parameters()}
],
lr=current_lr, momentum=0.9,
weight_decay=args.wd,
nesterov=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min', factor=0.5,
patience=10, verbose=False,
cooldown=10,
threshold_mode='rel',
threshold=1e-4, min_lr=1e-5)
scheduler.step(math.inf)
best_loss = math.inf
best_epoch = 0
checkpoint(backbone, clf,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_best.pkl'), 0)
############################
# save the initialization
checkpoint(backbone, clf,
optimizer, scheduler,
os.path.join(
args.dir, f'checkpoint_{starting_epoch}.pkl'), starting_epoch)
try:
for epoch in tqdm(range(starting_epoch, args.epochs)):
perf = train(backbone, clf, optimizer, trainloader,
base_trainloader,
epoch, args.epochs, logger, trainlog, args, device)
scheduler.step(perf['Loss/avg'])
# Always checkpoint after first epoch of training
if (epoch == starting_epoch) or ((epoch + 1) % args.save_freq == 0):
checkpoint(backbone, clf,
optimizer, scheduler,
os.path.join(
args.dir, f'checkpoint_{epoch + 1}.pkl'), epoch + 1)
if (epoch == starting_epoch) or ((epoch + 1) % args.eval_freq == 0):
performance_val = validate(backbone, clf,
base_valloader, valloader,
epoch+1, args.epochs, logger, vallog, args, device, postfix='Validation')
loss_val = performance_val['Loss_test/avg']
if best_loss > loss_val:
best_epoch = epoch + 1
checkpoint(backbone, clf,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_best.pkl'), best_epoch)
logger.info(
f"*** Best model checkpointed at Epoch {best_epoch}")
best_loss = loss_val
if (epoch + 1) % args.save_freq != 0:
checkpoint(backbone, clf,
optimizer, scheduler, os.path.join(
args.dir, f'checkpoint_{epoch + 1}.pkl'), epoch + 1)
finally:
trainlog.save()
vallog.save()
return
def checkpoint(model, clf, optimizer, scheduler, save_path, epoch):
'''
epoch: the number of epochs of training that has been done
Should resume from epoch
'''
sd = {
'model': copy.deepcopy(model.state_dict()),
'clf': copy.deepcopy(clf.state_dict()),
'opt': copy.deepcopy(optimizer.state_dict()),
'scheduler': copy.deepcopy(scheduler.state_dict()),
'epoch': epoch
}
torch.save(sd, save_path)
return sd
def load_checkpoint(model, clf, optimizer, scheduler, load_path, device):
'''
Load model and optimizer from load path
Return the epoch to continue the checkpoint
'''
sd = torch.load(load_path, map_location=torch.device(device))
model.load_state_dict(sd['model'])
clf.load_state_dict(sd['clf'])
optimizer.load_state_dict(sd['opt'])
scheduler.load_state_dict(sd['scheduler'])
return sd['epoch']
def train(model, clf,
optimizer, trainloader, base_trainloader, epoch,
num_epochs, logger, trainlog, args, device, turn_off_sync=False):
meters = utils.AverageMeterSet()
model.to(device)
model.train()
clf.train()
mse_criterion = nn.MSELoss()
loss_ce = nn.CrossEntropyLoss()
loader_iter = iter(trainloader)
end = time.time()
for i, (X_base, y_base) in enumerate(base_trainloader):
meters.update('Data_time', time.time() - end)
current_lr = optimizer.param_groups[0]['lr']
meters.update('lr', current_lr, 1)
# Get the data from the target dataset
try:
(X1, X2), y = loader_iter.next()
except StopIteration:
loader_iter = iter(trainloader)
(X1, X2), y = loader_iter.next()
X1 = X1.to(device)
X2 = X2.to(device)
y = y.to(device)
X_base = X_base.to(device)
y_base = y_base.to(device)
optimizer.zero_grad()
features_base = model(X_base)
logits_base = clf(features_base)
source_stat = clone_BN_stat(model)
shift_model = copy.deepcopy(model)
# shift the affine
shift_model(X1)
shift_model(X2)
shift_bias(shift_model, source_stat, device)
shifted_features_base = shift_model(X_base)
shifted_logits_base = clf(shifted_features_base)
# return values to the source
# regret_affine(model, source_affine)
loss_base = loss_ce(logits_base, y_base)
loss_xtask = mse_criterion(logits_base, shifted_logits_base)
loss = loss_base + loss_xtask
loss.backward()
optimizer.step()
# print(clone_BN_affine(model))
meters.update('Loss', loss.item(), 1)
meters.update('MSE_Loss_target', loss_xtask.item(), 1)
meters.update('CE_Loss_source', loss_base.item(), 1)
perf_base = utils.accuracy(logits_base.data,
y_base.data, topk=(1, ))
meters.update('top1_base', perf_base['average'][0].item(), len(X_base))
meters.update('top1_base_per_class',
perf_base['per_class_average'][0].item(), 1)
meters.update('Batch_time', time.time() - end)
end = time.time()
if (i + 1) % args.print_freq == 0:
values = meters.values()
averages = meters.averages()
sums = meters.sums()
logger_string = ('Training Epoch: [{epoch}/{epochs}] Step: [{step} / {steps}] '
'Batch Time: {meters[Batch_time]:.4f} '
'Data Time: {meters[Data_time]:.4f} Average Loss: {meters[Loss]:.4f} '
'Average MSE Loss (Target): {meters[MSE_Loss_target]:.4f} '
'Average CE Loss (Source): {meters[CE_Loss_source]: .4f} '
'Learning Rate: {meters[lr]:.4f} '
'Top1_base: {meters[top1_base]:.4f} '
'Top1_base_per_class: {meters[top1_base_per_class]:.4f} '
).format(
epoch=epoch, epochs=num_epochs, step=i+1, steps=len(base_trainloader), meters=meters)
logger.info(logger_string)
print(logger_string)
if (args.iteration_bp is not None) and (i+1) == args.iteration_bp:
break
logger_string = ('Training Epoch: [{epoch}/{epochs}] Step: [{step}] Batch Time: {meters[Batch_time]:.4f} '
'Data Time: {meters[Data_time]:.4f} Average Loss: {meters[Loss]:.4f} '
'Average MSE Loss (Target): {meters[MSE_Loss_target]:.4f} '
'Average CE Loss (Source): {meters[CE_Loss_source]: .4f} '
'Learning Rate: {meters[lr]:.4f} '
'Top1_base: {meters[top1_base]:.4f} '
'Top1_base_per_class: {meters[top1_base_per_class]:.4f} '
).format(
epoch=epoch+1, epochs=num_epochs, step=0, meters=meters)
logger.info(logger_string)
print(logger_string)
values = meters.values()
averages = meters.averages()
sums = meters.sums()
trainlog.record(epoch+1, {
**values,
**averages,
**sums
})
return averages
def validate(model, clf,
base_loader, testloader, epoch, num_epochs, logger,
testlog, args, device, postfix='Validation', turn_off_sync=False):
meters = utils.AverageMeterSet()
model.to(device)
model.eval()
clf.eval()
loss_ce = nn.CrossEntropyLoss()
mse_criterion = nn.MSELoss()
end = time.time()
logits_base_all = []
shifted_logits_base_all = []
ys_base_all = []
with torch.no_grad():
# Compute the loss on the source base dataset
for X_base, y_base in base_loader:
loader_iter = iter(testloader)
try:
(X1, X2), y = loader_iter.next()
except StopIteration:
loader_iter = iter(testloader)
(X1, X2), y = loader_iter.next()
X1 = X1.to(device)
X2 = X2.to(device)
y = y.to(device)
X_base = X_base.to(device)
y_base = y_base.to(device)
features = model(X_base)
logits_base = clf(features)
source_stat = clone_BN_stat(model)
shift_model = copy.deepcopy(model)
# shift the affine
f1 = shift_model(X1)
f2 = shift_model(X2)
shift_bias(shift_model, source_stat, device)
shifted_features_base = shift_model(X_base)
shifted_logits_base = clf(shifted_features_base)
# return values to the source
# regret_affine(model, source_affine)
logits_base_all.append(logits_base)
shifted_logits_base_all.append(shifted_logits_base)
ys_base_all.append(y_base)
ys_base_all = torch.cat(ys_base_all, dim=0)
logits_base_all = torch.cat(logits_base_all, dim=0)
shifted_logits_base_all = torch.cat(shifted_logits_base_all, dim=0)
loss_base = loss_ce(logits_base_all, ys_base_all)
loss_xtask = mse_criterion(shifted_logits_base_all, logits_base_all)
loss = loss_base + loss_xtask
meters.update('CE_Loss_source_test', loss_base.item(), 1)
meters.update('Loss_test', loss.item(), 1)
meters.update('MSE_Loss_target', loss_xtask.item(), 1)
perf_base = utils.accuracy(logits_base_all.data,
ys_base_all.data, topk=(1, ))
meters.update('top1_base_test', perf_base['average'][0].item(), 1)
meters.update('top1_base_test_per_class',
perf_base['per_class_average'][0].item(), 1)
meters.update('Batch_time', time.time() - end)
logger_string = ('{postfix} Epoch: [{epoch}/{epochs}] Batch Time: {meters[Batch_time]:.4f} '
'Average Test Loss: {meters[Loss_test]:.4f} '
'Average CE Loss (Source): {meters[CE_Loss_source_test]: .4f} '
'Average MSE Loss (Target): {meters[MSE_Loss_target]:.4f} '
'Top1_base_test: {meters[top1_base_test]:.4f} '
'Top1_base_test_per_class: {meters[top1_base_test_per_class]:.4f} ').format(
postfix=postfix, epoch=epoch, epochs=num_epochs, meters=meters)
logger.info(logger_string)
values = meters.values()
averages = meters.averages()
sums = meters.sums()
testlog.record(epoch, {
**values,
**averages,
**sums
})
if postfix != '':
postfix = '_' + postfix
return averages
def shift_bias(model, source_stat, device):
total_shift = 0
i = 0
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
target_mean = layer.running_mean.clone() # source state
source_mean = source_stat[i]['means']
source_var = source_stat[i]['vars']
shift_value = (source_mean - target_mean)
total_shift += torch.sum(shift_value)
# shift bias
layer.bias = nn.Parameter(layer.bias + ((torch.rand(len(source_mean)).to(
device) * shift_value.to(device)).to(
device) * layer.weight / source_var)).to(device)
i += 1
return total_shift
def clone_BN_affine(model):
BN_statistics_list = []
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
BN_statistics_list.append(
{'weight': layer.weight.clone(),
'bias': layer.bias.clone()})
return BN_statistics_list
def clone_BN_stat(model):
BN_statistics_list = []
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
BN_statistics_list.append(
{'means': layer.running_mean.clone(),
'vars': layer.running_var.clone()})
return BN_statistics_list
# def regret_affine(model, source_affine):
# i = 0
# for layer in model.modules():
# if isinstance(layer, nn.BatchNorm2d):
# layer.bias = nn.Parameter(source_affine[i]['bias'])
# layer.weight = nn.Parameter(source_affine[i]['weight'])
# i += 1
# def regret_stat(model, source_stat):
# i = 0
# for layer in model.modules():
# if isinstance(layer, nn.BatchNorm2d):
# layer.running_mean = nn.Parameter(source_stat[i]['means'])
# layer.running_var = nn.Parameter(source_stat[i]['vars'])
# i += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='STARTUP')
parser.add_argument('--dir', type=str, default='./logs/BMS_in_na/EuroSAT',
help='directory to save the checkpoints')
parser.add_argument('--bsize', type=int, default=32,
help='batch_size for STARTUP')
parser.add_argument('--epochs', type=int, default=1,
help='Number of training epochs')
parser.add_argument('--save_freq', type=int, default=50,
help='Frequency (in epoch) to save')
parser.add_argument('--eval_freq', type=int, default=2,
help='Frequency (in epoch) to evaluate on the val set')
parser.add_argument('--print_freq', type=int, default=10,
help='Frequency (in step per epoch) to print training stats')
parser.add_argument('--load_path', type=str, default=None,
help='Path to the checkpoint to be loaded')
parser.add_argument('--seed', type=int, default=1,
help='Seed for randomness')
parser.add_argument('--wd', type=float, default=1e-4,
help='Weight decay for the model')
parser.add_argument('--resume_latest', action='store_true',
help='resume from the latest model in args.dir')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers for dataloader')
parser.add_argument('--iteration_bp', type=int,
help='which step to break in the training loop')
parser.add_argument('--model', type=str, default='resnet10',
help='Backbone model')
parser.add_argument('--backbone_random_init', action='store_true',
help="Use random initialized backbone ")
parser.add_argument('--base_dataset', type=str,
default='miniImageNet', help='base_dataset to use')
parser.add_argument('--base_path', type=str,
default=miniImageNet_path, help='path to base dataset')
parser.add_argument('--base_split', type=str,
help='split for the base dataset')
parser.add_argument('--base_no_color_jitter', action='store_true',
help='remove color jitter for ImageNet')
parser.add_argument('--base_val_ratio', type=float, default=0.05,
help='amount of base dataset set aside for validation')
parser.add_argument('--batch_validate', action='store_true',
help='to do batch validate rather than validate on the full dataset (Ideally, for SimCLR,' +
' the validation should be on the full dataset but might not be feasible due to hardware constraints')
parser.add_argument('--target_dataset', type=str, default='EuroSAT',
help='the target domain dataset')
parser.add_argument('--target_subset_split', type=str, default='datasets/split_seed_1/EuroSAT_unlabeled_20.csv',
help='path to the csv files that specifies the unlabeled split for the target dataset')
parser.add_argument('--image_size', type=int, default=224,
help='Resolution of the input image')
args = parser.parse_args()
main(args)
|
# -*- coding: utf-8 -*-
"""
Install software from the FreeBSD ``ports(7)`` system
.. versionadded:: 2014.1.0
This module allows you to install ports using ``BATCH=yes`` to bypass
configuration prompts. It is recommended to use the :mod:`ports state
<salt.states.freebsdports>` to install ports, but it is also possible to use
this module exclusively from the command line.
.. code-block:: bash
salt minion-id ports.config security/nmap IPV6=off
salt minion-id ports.install security/nmap
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import fnmatch
import logging
import os
import re
# Import salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.path
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
from salt.ext.six import string_types
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "ports"
def __virtual__():
"""
Only runs on FreeBSD systems
"""
if __grains__["os"] == "FreeBSD":
return __virtualname__
return (
False,
"The freebsdports execution module cannot be loaded: "
"only available on FreeBSD systems.",
)
def _portsnap():
"""
Return 'portsnap --interactive' for FreeBSD 10, otherwise 'portsnap'
"""
ret = ["portsnap"]
if float(__grains__["osrelease"]) >= 10:
ret.append("--interactive")
return ret
def _check_portname(name):
"""
Check if portname is valid and whether or not the directory exists in the
ports tree.
"""
if not isinstance(name, string_types) or "/" not in name:
raise SaltInvocationError(
"Invalid port name '{0}' (category required)".format(name)
)
path = os.path.join("/usr/ports", name)
if not os.path.isdir(path):
raise SaltInvocationError("Path '{0}' does not exist".format(path))
return path
def _options_dir(name):
"""
Retrieve the path to the dir containing OPTIONS file for a given port
"""
_check_portname(name)
_root = "/var/db/ports"
# New path: /var/db/ports/category_portname
new_dir = os.path.join(_root, name.replace("/", "_"))
# Old path: /var/db/ports/portname
old_dir = os.path.join(_root, name.split("/")[-1])
if os.path.isdir(old_dir):
return old_dir
return new_dir
def _options_file_exists(name):
"""
Returns True/False based on whether or not the options file for the
specified port exists.
"""
return os.path.isfile(os.path.join(_options_dir(name), "options"))
def _write_options(name, configuration):
"""
Writes a new OPTIONS file
"""
_check_portname(name)
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
dirname = _options_dir(name)
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except OSError as exc:
raise CommandExecutionError("Unable to make {0}: {1}".format(dirname, exc))
with salt.utils.files.fopen(os.path.join(dirname, "options"), "w") as fp_:
sorted_options = list(conf_ptr)
sorted_options.sort()
fp_.write(
salt.utils.stringutils.to_str(
"# This file was auto-generated by Salt (http://saltstack.com)\n"
"# Options for {0}\n"
"_OPTIONS_READ={0}\n"
"_FILE_COMPLETE_OPTIONS_LIST={1}\n".format(
pkg, " ".join(sorted_options)
)
)
)
opt_tmpl = "OPTIONS_FILE_{0}SET+={1}\n"
for opt in sorted_options:
fp_.write(
salt.utils.stringutils.to_str(
opt_tmpl.format("" if conf_ptr[opt] == "on" else "UN", opt)
)
)
def _normalize(val):
"""
Fix Salt's yaml-ification of on/off, and otherwise normalize the on/off
values to be used in writing the options file
"""
if isinstance(val, bool):
return "on" if val else "off"
return six.text_type(val).lower()
def install(name, clean=True):
"""
Install a port from the ports tree. Installs using ``BATCH=yes`` for
non-interactive building. To set config options for a given port, use
:mod:`ports.config <salt.modules.freebsdports.config>`.
clean : True
If ``True``, cleans after installation. Equivalent to running ``make
install clean BATCH=yes``.
.. note::
It may be helpful to run this function using the ``-t`` option to set a
higher timeout, since compiling a port may cause the Salt command to
exceed the default timeout.
CLI Example:
.. code-block:: bash
salt -t 1200 '*' ports.install security/nmap
"""
portpath = _check_portname(name)
old = __salt__["pkg.list_pkgs"]()
if old.get(name.rsplit("/")[-1]):
deinstall(name)
cmd = ["make", "install"]
if clean:
cmd.append("clean")
cmd.append("BATCH=yes")
result = __salt__["cmd.run_all"](
cmd, cwd=portpath, reset_system_locale=False, python_shell=False
)
if result["retcode"] != 0:
__context__["ports.install_error"] = result["stderr"]
__context__.pop("pkg.list_pkgs", None)
new = __salt__["pkg.list_pkgs"]()
ret = salt.utils.data.compare_dicts(old, new)
if not ret and result["retcode"] == 0:
# No change in package list, but the make install was successful.
# Assume that the installation was a recompile with new options, and
# set return dict so that changes are detected by the ports.installed
# state.
ret = {name: {"old": old.get(name, ""), "new": new.get(name, "")}}
return ret
def deinstall(name):
"""
De-install a port.
CLI Example:
.. code-block:: bash
salt '*' ports.deinstall security/nmap
"""
portpath = _check_portname(name)
old = __salt__["pkg.list_pkgs"]()
result = __salt__["cmd.run_all"](
["make", "deinstall", "BATCH=yes"], cwd=portpath, python_shell=False
)
__context__.pop("pkg.list_pkgs", None)
new = __salt__["pkg.list_pkgs"]()
return salt.utils.data.compare_dicts(old, new)
def rmconfig(name):
"""
Clear the cached options for the specified port; run a ``make rmconfig``
name
The name of the port to clear
CLI Example:
.. code-block:: bash
salt '*' ports.rmconfig security/nmap
"""
portpath = _check_portname(name)
return __salt__["cmd.run"](["make", "rmconfig"], cwd=portpath, python_shell=False)
def showconfig(name, default=False, dict_return=False):
"""
Show the configuration options for a given port.
default : False
Show the default options for a port (not necessarily the same as the
current configuration)
dict_return : False
Instead of returning the output of ``make showconfig``, return the data
in an dictionary
CLI Example:
.. code-block:: bash
salt '*' ports.showconfig security/nmap
salt '*' ports.showconfig security/nmap default=True
"""
portpath = _check_portname(name)
if default and _options_file_exists(name):
saved_config = showconfig(name, default=False, dict_return=True)
rmconfig(name)
if _options_file_exists(name):
raise CommandExecutionError("Unable to get default configuration")
default_config = showconfig(name, default=False, dict_return=dict_return)
_write_options(name, saved_config)
return default_config
try:
result = __salt__["cmd.run_all"](
["make", "showconfig"], cwd=portpath, python_shell=False
)
output = result["stdout"].splitlines()
if result["retcode"] != 0:
error = result["stderr"]
else:
error = ""
except TypeError:
error = result
if error:
msg = "Error running 'make showconfig' for {0}: {1}".format(name, error)
log.error(msg)
raise SaltInvocationError(msg)
if not dict_return:
return "\n".join(output)
if (not output) or ("configuration options" not in output[0]):
return {}
try:
pkg = output[0].split()[-1].rstrip(":")
except (IndexError, AttributeError, TypeError) as exc:
log.error("Unable to get pkg-version string: %s", exc)
return {}
ret = {pkg: {}}
output = output[1:]
for line in output:
try:
opt, val, desc = re.match(r"\s+([^=]+)=(off|on): (.+)", line).groups()
except AttributeError:
continue
ret[pkg][opt] = val
if not ret[pkg]:
return {}
return ret
def config(name, reset=False, **kwargs):
"""
Modify configuration options for a given port. Multiple options can be
specified. To see the available options for a port, use
:mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`.
name
The port name, in ``category/name`` format
reset : False
If ``True``, runs a ``make rmconfig`` for the port, clearing its
configuration before setting the desired options
CLI Examples:
.. code-block:: bash
salt '*' ports.config security/nmap IPV6=off
"""
portpath = _check_portname(name)
if reset:
rmconfig(name)
configuration = showconfig(name, dict_return=True)
if not configuration:
raise CommandExecutionError(
"Unable to get port configuration for '{0}'".format(name)
)
# Get top-level key for later reference
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
opts = dict(
(six.text_type(x), _normalize(kwargs[x]))
for x in kwargs
if not x.startswith("_")
)
bad_opts = [x for x in opts if x not in conf_ptr]
if bad_opts:
raise SaltInvocationError(
"The following opts are not valid for port {0}: {1}".format(
name, ", ".join(bad_opts)
)
)
bad_vals = [
"{0}={1}".format(x, y) for x, y in six.iteritems(opts) if y not in ("on", "off")
]
if bad_vals:
raise SaltInvocationError(
"The following key/value pairs are invalid: {0}".format(", ".join(bad_vals))
)
conf_ptr.update(opts)
_write_options(name, configuration)
new_config = showconfig(name, dict_return=True)
try:
new_config = new_config[next(iter(new_config))]
except (StopIteration, TypeError):
return False
return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr)
def update(extract=False):
"""
Update the ports tree
extract : False
If ``True``, runs a ``portsnap extract`` after fetching, should be used
for first-time installation of the ports tree.
CLI Example:
.. code-block:: bash
salt '*' ports.update
"""
result = __salt__["cmd.run_all"](_portsnap() + ["fetch"], python_shell=False)
if not result["retcode"] == 0:
raise CommandExecutionError(
"Unable to fetch ports snapshot: {0}".format(result["stderr"])
)
ret = []
try:
patch_count = re.search(r"Fetching (\d+) patches", result["stdout"]).group(1)
except AttributeError:
patch_count = 0
try:
new_port_count = re.search(
r"Fetching (\d+) new ports or files", result["stdout"]
).group(1)
except AttributeError:
new_port_count = 0
ret.append("Applied {0} new patches".format(patch_count))
ret.append("Fetched {0} new ports or files".format(new_port_count))
if extract:
result = __salt__["cmd.run_all"](_portsnap() + ["extract"], python_shell=False)
if not result["retcode"] == 0:
raise CommandExecutionError(
"Unable to extract ports snapshot {0}".format(result["stderr"])
)
result = __salt__["cmd.run_all"](_portsnap() + ["update"], python_shell=False)
if not result["retcode"] == 0:
raise CommandExecutionError(
"Unable to apply ports snapshot: {0}".format(result["stderr"])
)
__context__.pop("ports.list_all", None)
return "\n".join(ret)
def list_all():
"""
Lists all ports available.
CLI Example:
.. code-block:: bash
salt '*' ports.list_all
.. warning::
Takes a while to run, and returns a **LOT** of output
"""
if "ports.list_all" not in __context__:
__context__["ports.list_all"] = []
for path, dirs, files in salt.utils.path.os_walk("/usr/ports"):
stripped = path[len("/usr/ports") :]
if stripped.count("/") != 2 or stripped.endswith("/CVS"):
continue
__context__["ports.list_all"].append(stripped[1:])
return __context__["ports.list_all"]
def search(name):
"""
Search for matches in the ports tree. Globs are supported, and the category
is optional
CLI Examples:
.. code-block:: bash
salt '*' ports.search 'security/*'
salt '*' ports.search 'security/n*'
salt '*' ports.search nmap
.. warning::
Takes a while to run
"""
name = six.text_type(name)
all_ports = list_all()
if "/" in name:
if name.count("/") > 1:
raise SaltInvocationError(
"Invalid search string '{0}'. Port names cannot have more "
"than one slash"
)
else:
return fnmatch.filter(all_ports, name)
else:
ret = []
for port in all_ports:
if fnmatch.fnmatch(port.rsplit("/")[-1], name):
ret.append(port)
return ret
|
import { bitNotBigNumber } from '../../utils/bignumber/bitwise';
import { deepMap } from '../../utils/collection';
import { factory } from '../../utils/factory';
import { bitNotNumber } from '../../plain/number';
var name = 'bitNot';
var dependencies = ['typed'];
export var createBitNot = /* #__PURE__ */factory(name, dependencies, function (_ref) {
var typed = _ref.typed;
/**
* Bitwise NOT value, `~x`.
* For matrices, the function is evaluated element wise.
* For units, the function is evaluated on the best prefix base.
*
* Syntax:
*
* math.bitNot(x)
*
* Examples:
*
* math.bitNot(1) // returns number -2
*
* math.bitNot([2, -3, 4]) // returns Array [-3, 2, 5]
*
* See also:
*
* bitAnd, bitOr, bitXor, leftShift, rightArithShift, rightLogShift
*
* @param {number | BigNumber | Array | Matrix} x Value to not
* @return {number | BigNumber | Array | Matrix} NOT of `x`
*/
return typed(name, {
number: bitNotNumber,
BigNumber: bitNotBigNumber,
'Array | Matrix': function ArrayMatrix(x) {
return deepMap(x, this);
}
});
});
|
// UIImage+Alpha.h
// Created by Trevor Harmon on 9/20/09.
// Free for personal or commercial use, with or without modification.
// No warranty is expressed or implied.
// NOTE: Louisdor_Rolf_Project_1 modified to convert from Category to
// new Class name since iPhone seems to have some issues with Categories
// of built in Classes
// Helper methods for adding an alpha layer to an image
@interface UIImageAlpha : NSObject
{
}
+ (BOOL)hasAlpha:(UIImage*)image;
+ (UIImage *)imageWithAlpha:(UIImage*)image;
+ (UIImage *)transparentBorderImage:(NSUInteger)borderSize image:(UIImage*)image;
@end
|
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([[37],{
/***/ "./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js&":
/*!*************************************************************************************************************************************************************************************!*\
!*** ./node_modules/babel-loader/lib??ref--4-0!./node_modules/vue-loader/lib??vue-loader-options!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js& ***!
\*************************************************************************************************************************************************************************************/
/*! exports provided: default */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
/* harmony default export */ __webpack_exports__["default"] = ({
components: {},
data: function data() {
return {
IDCard: [],
employees: [],
employee: null,
isAdmin: false
};
},
created: function created() {
this.fetchReport('all');
this.fetchUserList();
},
methods: {
fetchUserList: function fetchUserList() {
var _this = this;
axios.get("/api/work-group/list/users").then(function (response) {
_this.employees = response.data.data;
})["catch"](function (err) {
_this.notificationAlert(err.response);
});
},
resetColFilters: function resetColFilters() {
//Reset filters
this.statusFilter = {
label: 'Active',
value: 1
};
this.fetchReport('all');
this.employee = null;
this.$refs.filterCard.removeRefreshAnimation();
},
fetchReport: function fetchReport(target) {
var _this2 = this;
axios.get("/api/report/idcard-report", {
params: {
target: target
}
}).then(function (response) {
_this2.IDCard = response.data.idcard_data;
_this2.isAdmin = response.data.isAdmin;
})["catch"](function (err) {
_this2.notificationAlert(err.response);
});
},
exportExcel: function exportExcel() {
var params = {
id: this.employee
};
var paramString = new URLSearchParams(params);
window.open("/export-id-card?".concat(paramString.toString()));
}
}
});
/***/ }),
/***/ "./node_modules/css-loader/index.js!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src/index.js?!./node_modules/sass-loader/dist/cjs.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&":
/*!************************************************************************************************************************************************************************************************************************************************************************************************************************************!*\
!*** ./node_modules/css-loader!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src??ref--8-2!./node_modules/sass-loader/dist/cjs.js??ref--8-3!./node_modules/vue-loader/lib??vue-loader-options!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss& ***!
\************************************************************************************************************************************************************************************************************************************************************************************************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
exports = module.exports = __webpack_require__(/*! ../../../../../../node_modules/css-loader/lib/css-base.js */ "./node_modules/css-loader/lib/css-base.js")(false);
// imports
// module
exports.push([module.i, ".con-vs-dropdown--menu {\n width: 15%;\n}", ""]);
// exports
/***/ }),
/***/ "./node_modules/style-loader/index.js!./node_modules/css-loader/index.js!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src/index.js?!./node_modules/sass-loader/dist/cjs.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&":
/*!****************************************************************************************************************************************************************************************************************************************************************************************************************************************************************!*\
!*** ./node_modules/style-loader!./node_modules/css-loader!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src??ref--8-2!./node_modules/sass-loader/dist/cjs.js??ref--8-3!./node_modules/vue-loader/lib??vue-loader-options!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss& ***!
\****************************************************************************************************************************************************************************************************************************************************************************************************************************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
var content = __webpack_require__(/*! !../../../../../../node_modules/css-loader!../../../../../../node_modules/vue-loader/lib/loaders/stylePostLoader.js!../../../../../../node_modules/postcss-loader/src??ref--8-2!../../../../../../node_modules/sass-loader/dist/cjs.js??ref--8-3!../../../../../../node_modules/vue-loader/lib??vue-loader-options!./IdCardReport.vue?vue&type=style&index=0&lang=scss& */ "./node_modules/css-loader/index.js!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src/index.js?!./node_modules/sass-loader/dist/cjs.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&");
if(typeof content === 'string') content = [[module.i, content, '']];
var transform;
var insertInto;
var options = {"hmr":true}
options.transform = transform
options.insertInto = undefined;
var update = __webpack_require__(/*! ../../../../../../node_modules/style-loader/lib/addStyles.js */ "./node_modules/style-loader/lib/addStyles.js")(content, options);
if(content.locals) module.exports = content.locals;
if(false) {}
/***/ }),
/***/ "./node_modules/vue-loader/lib/loaders/templateLoader.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4&":
/*!*****************************************************************************************************************************************************************************************************************************!*\
!*** ./node_modules/vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/vue-loader/lib??vue-loader-options!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4& ***!
\*****************************************************************************************************************************************************************************************************************************/
/*! exports provided: render, staticRenderFns */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "render", function() { return render; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "staticRenderFns", function() { return staticRenderFns; });
var render = function() {
var _vm = this
var _h = _vm.$createElement
var _c = _vm._self._c || _h
return _c(
"div",
[
_vm.isAdmin
? _c(
"vx-card",
{
ref: "filterCard",
staticClass: "user-list-filters mb-8",
attrs: { actionButtons: "" },
on: { refresh: _vm.resetColFilters, remove: _vm.resetColFilters }
},
[
_c("div", { staticClass: "vx-row" }, [
_c(
"div",
{ staticClass: "vx-col md:w-3/12 sm:w-1/2 w-full" },
[
_c(
"vs-select",
{
staticClass: "w-full",
attrs: {
autocomplete: "",
label: "Employee",
placeholder: "Select an employee"
},
on: {
input: function($event) {
return _vm.fetchReport(_vm.employee)
}
},
model: {
value: _vm.employee,
callback: function($$v) {
_vm.employee = $$v
},
expression: "employee"
}
},
_vm._l(_vm.employees, function(item, index) {
return _c("vs-select-item", {
key: index,
attrs: {
text: item.full_name + " (" + item.emp_id + ")",
value: item.user_id
}
})
}),
1
)
],
1
)
])
]
)
: _vm._e(),
_vm._v(" "),
_c(
"vx-card",
[
_c(
"vs-table",
{
attrs: {
"max-items": "10",
pagination: "",
search: "",
stripe: "",
data: _vm.IDCard
},
scopedSlots: _vm._u([
{
key: "default",
fn: function(ref) {
var data = ref.data
return _vm._l(data, function(tr, indextr) {
return _c(
"vs-tr",
{ key: indextr },
[
_c("vs-td", { attrs: { data: tr.full_name } }, [
_vm._v(" " + _vm._s(tr.full_name) + " ")
]),
_vm._v(" "),
_c("vs-td", { attrs: { data: tr.reason } }, [
_vm._v(" " + _vm._s(tr.reason) + " ")
]),
_vm._v(" "),
_c("vs-td", { attrs: { data: tr.remarks } }, [
_vm._v(" " + _vm._s(tr.remarks) + " ")
]),
_vm._v(" "),
_c("vs-td", { attrs: { data: tr.status } }, [
_vm._v(_vm._s(tr.status))
]),
_vm._v(" "),
_c(
"vs-td",
{ attrs: { id: "action-buttons" } },
[
_c(
"vs-dropdown",
{ attrs: { "vs-trigger-click": "" } },
[
_c("vs-button", {
attrs: {
color: "dark",
icon: "more_vert",
type: "flat"
}
}),
_vm._v(" "),
_c(
"vs-dropdown-menu",
[
_c(
"vs-dropdown-item",
{
attrs: {
to: {
path: "/profile/" + tr.user_id
}
}
},
[
_c(
"span",
{
staticClass: "flex items-center"
},
[
_c("vs-icon", {
staticClass: "mr-2",
attrs: { icon: "preview" }
}),
_vm._v(" "),
_c("span", [
_vm._v("Request Details")
])
],
1
)
]
)
],
1
)
],
1
)
],
1
)
],
1
)
})
}
}
])
},
[
_c(
"template",
{ slot: "thead" },
[
_c("vs-th", { attrs: { "sort-key": "" } }, [
_vm._v("Employee")
]),
_vm._v(" "),
_c("vs-th", { attrs: { "sort-key": "" } }, [
_vm._v("Reason")
]),
_vm._v(" "),
_c("vs-th", { attrs: { "sort-key": "" } }, [
_vm._v("Remarks")
]),
_vm._v(" "),
_c("vs-th", { attrs: { "sort-key": "" } }, [
_vm._v("Status")
]),
_vm._v(" "),
_c("vs-th", [_vm._v("Action")])
],
1
)
],
2
),
_vm._v(" "),
_c(
"vs-button",
{
staticStyle: { "margin-top": "17px" },
attrs: {
size: "small",
"icon-pack": "feather",
icon: "icon-file-plus"
},
on: { click: _vm.exportExcel }
},
[_vm._v("Excel Export")]
)
],
1
)
],
1
)
}
var staticRenderFns = []
render._withStripped = true
/***/ }),
/***/ "./resources/js/src/views/service/report/IdCardReport.vue":
/*!****************************************************************!*\
!*** ./resources/js/src/views/service/report/IdCardReport.vue ***!
\****************************************************************/
/*! exports provided: default */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony import */ var _IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./IdCardReport.vue?vue&type=template&id=b071a8f4& */ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4&");
/* harmony import */ var _IdCardReport_vue_vue_type_script_lang_js___WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./IdCardReport.vue?vue&type=script&lang=js& */ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js&");
/* empty/unused harmony star reexport *//* harmony import */ var _IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./IdCardReport.vue?vue&type=style&index=0&lang=scss& */ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&");
/* harmony import */ var _node_modules_vue_loader_lib_runtime_componentNormalizer_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ../../../../../../node_modules/vue-loader/lib/runtime/componentNormalizer.js */ "./node_modules/vue-loader/lib/runtime/componentNormalizer.js");
/* normalize component */
var component = Object(_node_modules_vue_loader_lib_runtime_componentNormalizer_js__WEBPACK_IMPORTED_MODULE_3__["default"])(
_IdCardReport_vue_vue_type_script_lang_js___WEBPACK_IMPORTED_MODULE_1__["default"],
_IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__["render"],
_IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__["staticRenderFns"],
false,
null,
null,
null
)
/* hot reload */
if (false) { var api; }
component.options.__file = "resources/js/src/views/service/report/IdCardReport.vue"
/* harmony default export */ __webpack_exports__["default"] = (component.exports);
/***/ }),
/***/ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js&":
/*!*****************************************************************************************!*\
!*** ./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js& ***!
\*****************************************************************************************/
/*! exports provided: default */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony import */ var _node_modules_babel_loader_lib_index_js_ref_4_0_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_script_lang_js___WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! -!../../../../../../node_modules/babel-loader/lib??ref--4-0!../../../../../../node_modules/vue-loader/lib??vue-loader-options!./IdCardReport.vue?vue&type=script&lang=js& */ "./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=script&lang=js&");
/* empty/unused harmony star reexport */ /* harmony default export */ __webpack_exports__["default"] = (_node_modules_babel_loader_lib_index_js_ref_4_0_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_script_lang_js___WEBPACK_IMPORTED_MODULE_0__["default"]);
/***/ }),
/***/ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&":
/*!**************************************************************************************************!*\
!*** ./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss& ***!
\**************************************************************************************************/
/*! no static exports found */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony import */ var _node_modules_style_loader_index_js_node_modules_css_loader_index_js_node_modules_vue_loader_lib_loaders_stylePostLoader_js_node_modules_postcss_loader_src_index_js_ref_8_2_node_modules_sass_loader_dist_cjs_js_ref_8_3_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! -!../../../../../../node_modules/style-loader!../../../../../../node_modules/css-loader!../../../../../../node_modules/vue-loader/lib/loaders/stylePostLoader.js!../../../../../../node_modules/postcss-loader/src??ref--8-2!../../../../../../node_modules/sass-loader/dist/cjs.js??ref--8-3!../../../../../../node_modules/vue-loader/lib??vue-loader-options!./IdCardReport.vue?vue&type=style&index=0&lang=scss& */ "./node_modules/style-loader/index.js!./node_modules/css-loader/index.js!./node_modules/vue-loader/lib/loaders/stylePostLoader.js!./node_modules/postcss-loader/src/index.js?!./node_modules/sass-loader/dist/cjs.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=style&index=0&lang=scss&");
/* harmony import */ var _node_modules_style_loader_index_js_node_modules_css_loader_index_js_node_modules_vue_loader_lib_loaders_stylePostLoader_js_node_modules_postcss_loader_src_index_js_ref_8_2_node_modules_sass_loader_dist_cjs_js_ref_8_3_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_node_modules_style_loader_index_js_node_modules_css_loader_index_js_node_modules_vue_loader_lib_loaders_stylePostLoader_js_node_modules_postcss_loader_src_index_js_ref_8_2_node_modules_sass_loader_dist_cjs_js_ref_8_3_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_0__);
/* harmony reexport (unknown) */ for(var __WEBPACK_IMPORT_KEY__ in _node_modules_style_loader_index_js_node_modules_css_loader_index_js_node_modules_vue_loader_lib_loaders_stylePostLoader_js_node_modules_postcss_loader_src_index_js_ref_8_2_node_modules_sass_loader_dist_cjs_js_ref_8_3_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_0__) if(["default"].indexOf(__WEBPACK_IMPORT_KEY__) < 0) (function(key) { __webpack_require__.d(__webpack_exports__, key, function() { return _node_modules_style_loader_index_js_node_modules_css_loader_index_js_node_modules_vue_loader_lib_loaders_stylePostLoader_js_node_modules_postcss_loader_src_index_js_ref_8_2_node_modules_sass_loader_dist_cjs_js_ref_8_3_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_style_index_0_lang_scss___WEBPACK_IMPORTED_MODULE_0__[key]; }) }(__WEBPACK_IMPORT_KEY__));
/***/ }),
/***/ "./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4&":
/*!***********************************************************************************************!*\
!*** ./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4& ***!
\***********************************************************************************************/
/*! exports provided: render, staticRenderFns */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony import */ var _node_modules_vue_loader_lib_loaders_templateLoader_js_vue_loader_options_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! -!../../../../../../node_modules/vue-loader/lib/loaders/templateLoader.js??vue-loader-options!../../../../../../node_modules/vue-loader/lib??vue-loader-options!./IdCardReport.vue?vue&type=template&id=b071a8f4& */ "./node_modules/vue-loader/lib/loaders/templateLoader.js?!./node_modules/vue-loader/lib/index.js?!./resources/js/src/views/service/report/IdCardReport.vue?vue&type=template&id=b071a8f4&");
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "render", function() { return _node_modules_vue_loader_lib_loaders_templateLoader_js_vue_loader_options_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__["render"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "staticRenderFns", function() { return _node_modules_vue_loader_lib_loaders_templateLoader_js_vue_loader_options_node_modules_vue_loader_lib_index_js_vue_loader_options_IdCardReport_vue_vue_type_template_id_b071a8f4___WEBPACK_IMPORTED_MODULE_0__["staticRenderFns"]; });
/***/ })
}]);
|
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, June 7, 2020 at 11:43:55 AM Mountain Standard Time
* Operating System: Version 13.4.5 (Build 17L562)
* Image Source: /System/Library/PrivateFrameworks/PhotosGraph.framework/PhotosGraph
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
@class NSArray, CLCircularRegion;
@interface PGAssetCluster : NSObject {
NSArray* _assets;
CLCircularRegion* _region;
}
@property (nonatomic,retain,readonly) NSArray * assets; //@synthesize assets=_assets - In the implementation block
@property (nonatomic,retain,readonly) CLCircularRegion * region; //@synthesize region=_region - In the implementation block
-(CLCircularRegion *)region;
-(NSArray *)assets;
-(id)initWithAssets:(id)arg1 region:(id)arg2 ;
@end
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from ..... import tensor as mt
from ..... import dataframe as md
from .....utils import lazy_import
from .. import MarsDataset, RandomSampler, SequentialSampler, \
SubsetRandomSampler, DistributedSampler, run_pytorch_script
torch_installed = lazy_import('torch', globals=globals()) is not None
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_mars_dataset(setup):
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
# Mars tensor
data = mt.random.rand(1000, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1000, 10), dtype='f4')
data_verify = data[1].execute().fetch()
labels_verify = labels[1].execute().fetch()
train_dataset = MarsDataset(data, labels)
assert isinstance(train_dataset, Dataset)
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
np.testing.assert_array_equal(train_dataset[1][1], labels_verify)
assert len(train_dataset) == 1000
# np ndarray
data = np.random.rand(1000, 32)
labels = np.random.randint(0, 2, (1000, 10))
data_verify = data[1]
labels.dtype = "float32"
labels_verify = labels[1]
train_dataset = MarsDataset(data, labels)
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
np.testing.assert_array_equal(train_dataset[1][1], labels_verify)
assert len(train_dataset) == 1000
# Mars dataframe
data = md.DataFrame(data)
labels = md.DataFrame(labels)
data_verify = data.iloc[1].execute().fetch().values
labels_verify = labels.iloc[1].execute().fetch().values
train_dataset = MarsDataset(data, labels, fetch_kwargs={
'extra_config': {'check_series_name': False}})
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
np.testing.assert_array_equal(train_dataset[1][1], labels_verify)
assert len(train_dataset) == 1000
# Mars Series
label = labels[1]
label_verify = label[1].execute().fetch()
train_dataset = MarsDataset(data, label, fetch_kwargs={
'extra_config': {'check_series_name': False}})
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
assert train_dataset[1][1] == label_verify
assert len(train_dataset) == 1000
# pandas dataframe
data = pd.DataFrame(np.random.rand(1000, 32))
labels = pd.DataFrame(np.random.randint(0, 2, (1000, 10)), dtype="float32")
data_verify = data.iloc[1].values
labels_verify = labels.iloc[1].values
train_dataset = MarsDataset(data, labels)
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
np.testing.assert_array_equal(train_dataset[1][1], labels_verify)
assert len(train_dataset) == 1000
# pands series
label = labels[1]
label_verify = label[1]
train_dataset = MarsDataset(data, label)
np.testing.assert_array_equal(train_dataset[1][0], data_verify)
assert train_dataset[1][1] == label_verify
assert len(train_dataset) == 1000
# test TypeError
label = tuple(range(1000))
with pytest.raises(TypeError) as e:
train_dataset = MarsDataset(data, label)
exec_msg = e.value.args[0]
assert exec_msg == "Unexpected dataset type: <class 'tuple'>"
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_sequential_sampler(setup_cluster):
import torch
data = mt.random.rand(1000, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1000, 10), dtype='f4')
train_dataset = MarsDataset(data, labels)
assert len(train_dataset) == 1000
train_sampler = SequentialSampler(train_dataset)
assert len(train_sampler) == 1000
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
model = torch.nn.Sequential(
torch.nn.Linear(32, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 10),
torch.nn.Softmax(dim=1),
)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
criterion = torch.nn.BCELoss()
for _ in range(2):
# 2 epochs
for _, (batch_data, batch_labels) in enumerate(train_loader):
outputs = model(batch_data)
loss = criterion(outputs.squeeze(), batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_random_sampler(setup_cluster):
import torch
data = mt.random.rand(1000, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1000, 10), dtype='f4')
train_dataset = MarsDataset(data, labels)
# test __init__()
with pytest.raises(ValueError) as e:
train_sampler = RandomSampler(train_dataset, replacement=1)
exec_msg = e.value.args[0]
assert exec_msg == "replacement should be a boolean value, but got replacement=1"
with pytest.raises(ValueError) as e:
train_sampler = RandomSampler(train_dataset, num_samples=900)
exec_msg = e.value.args[0]
assert exec_msg == "With replacement=False, num_samples should not " + \
"be specified, since a random permute will be performed."
with pytest.raises(ValueError) as e:
train_sampler = RandomSampler(train_dataset, replacement=True, num_samples=-1)
exec_msg = e.value.args[0]
assert exec_msg == "num_samples should be a positive integer value, but got num_samples=-1"
train_sampler = RandomSampler(train_dataset)
# test __len__ num_samples()
assert len(train_sampler) == 1000
assert train_sampler.num_samples == 1000
# test __iter__
g_cpu = torch.Generator()
g_cpu.manual_seed(2147483647)
train_sampler = RandomSampler(train_dataset, generator=g_cpu)
assert len(train_sampler) == 1000
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
for _, (batch_data, batch_labels) in enumerate(train_loader):
assert len(batch_data[0]) == 32
assert len(batch_labels[0]) == 10
train_sampler = RandomSampler(train_dataset, replacement=True, num_samples=900)
assert len(train_sampler) == 900
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
for _, (batch_data, batch_labels) in enumerate(train_loader):
assert len(batch_data[0]) == 32
assert len(batch_labels[0]) == 10
# torch train
model = torch.nn.Sequential(
torch.nn.Linear(32, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 10),
torch.nn.Softmax(dim=1),
)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
criterion = torch.nn.BCELoss()
for _ in range(2):
# 2 epochs
for _, (batch_data, batch_labels) in enumerate(train_loader):
outputs = model(batch_data)
loss = criterion(outputs.squeeze(), batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_subset_random_sampler(setup_cluster):
import numpy as np
import torch
data = mt.random.rand(1000, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1000, 10), dtype='f4')
data.execute()
labels.execute()
train_dataset = MarsDataset(data, labels)
train_sampler = SubsetRandomSampler(
np.random.choice(range(len(train_dataset)), len(train_dataset)))
assert len(train_sampler) == 1000
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
for _, (batch_data, batch_labels) in enumerate(train_loader):
assert len(batch_data[0]) == 32
assert len(batch_labels[0]) == 10
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_distributed_sampler(setup_cluster):
import torch
data = mt.random.rand(1001, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1001, 10), dtype='f4')
train_dataset = MarsDataset(data, labels)
with pytest.raises(ValueError) as e:
train_sampler = DistributedSampler(train_dataset, num_replicas=2, rank=-1)
exec_msg = e.value.args[0]
assert exec_msg == "Invalid rank -1, rank should be in the interval [0, 1]"
train_sampler = DistributedSampler(train_dataset, num_replicas=2, rank=0,
drop_last=True, shuffle=True)
assert len(train_sampler) == 500
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
for _, (batch_data, batch_labels) in enumerate(train_loader):
assert len(batch_data[0]) == 32
assert len(batch_labels[0]) == 10
train_sampler = DistributedSampler(train_dataset, num_replicas=2, rank=0,
drop_last=False, shuffle=False)
train_sampler.set_epoch(10)
assert len(train_sampler) == 501
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
sampler=train_sampler)
for _, (batch_data, batch_labels) in enumerate(train_loader):
assert len(batch_data[0]) == 32
assert len(batch_labels[0]) == 10
@pytest.mark.skipif(not torch_installed, reason='pytorch not installed')
def test_mars_dataset_script(setup_cluster):
sess = setup_cluster
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'pytorch_dataset.py')
data = mt.random.rand(1000, 32, dtype='f4')
labels = mt.random.randint(0, 2, (1000, 10), dtype='f4')
assert run_pytorch_script(
path, n_workers=2, data={'feature_data': data, 'labels': labels},
command_argv=['multiple'], port=9945, session=sess).fetch()['status'] == 'ok'
|
HTTP_100_CONTINUE = 100
HTTP_101_SWITCHING_PROTOCOLS = 101
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_202_ACCEPTED = 202
HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_204_NO_CONTENT = 204
HTTP_205_RESET_CONTENT = 205
HTTP_206_PARTIAL_CONTENT = 206
HTTP_207_MULTI_STATUS = 207
HTTP_300_MULTIPLE_CHOICES = 300
HTTP_301_MOVED_PERMANENTLY = 301
HTTP_302_FOUND = 302
HTTP_303_SEE_OTHER = 303
HTTP_304_NOT_MODIFIED = 304
HTTP_305_USE_PROXY = 305
HTTP_306_RESERVED = 306
HTTP_307_TEMPORARY_REDIRECT = 307
HTTP_400_BAD_REQUEST = 400
HTTP_401_UNAUTHORIZED = 401
HTTP_402_PAYMENT_REQUIRED = 402
HTTP_403_FORBIDDEN = 403
HTTP_404_NOT_FOUND = 404
HTTP_405_METHOD_NOT_ALLOWED = 405
HTTP_406_NOT_ACCEPTABLE = 406
HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_408_REQUEST_TIMEOUT = 408
HTTP_409_CONFLICT = 409
HTTP_410_GONE = 410
HTTP_411_LENGTH_REQUIRED = 411
HTTP_412_PRECONDITION_FAILED = 412
HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_414_REQUEST_URI_TOO_LONG = 414
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_417_EXPECTATION_FAILED = 417
HTTP_422_UNPROCESSABLE_ENTITY = 422
HTTP_423_LOCKED = 423
HTTP_424_FAILED_DEPENDENCY = 424
HTTP_428_PRECONDITION_REQUIRED = 428
HTTP_429_TOO_MANY_REQUESTS = 429
HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS = 451
HTTP_500_INTERNAL_SERVER_ERROR = 500
HTTP_501_NOT_IMPLEMENTED = 501
HTTP_502_BAD_GATEWAY = 502
HTTP_503_SERVICE_UNAVAILABLE = 503
HTTP_504_GATEWAY_TIMEOUT = 504
HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_507_INSUFFICIENT_STORAGE = 507
HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511
|
var searchData=
[
['last_1217',['last',['../classoperations__research_1_1_sorted_disjoint_interval_list.html#a2b032ed2ce05e48aa222df46d5701be6',1,'operations_research::SortedDisjointIntervalList']]],
['lastintervallessorequal_1218',['LastIntervalLessOrEqual',['../classoperations__research_1_1_sorted_disjoint_interval_list.html#a4ccfd91a756b041e4d5a8feb2d621ada',1,'operations_research::SortedDisjointIntervalList']]],
['lbd_5fmoving_5faverage_5frestart_1219',['LBD_MOVING_AVERAGE_RESTART',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a13ba09bbfc5bba6baa4b160257aae28f',1,'operations_research::sat::PROTOBUF_FINAL']]],
['limitreached_1220',['LimitReached',['../classoperations__research_1_1_time_limit.html#a810d5f7aaf80cc09cf5a094e20c1aaca',1,'operations_research::TimeLimit::LimitReached()'],['../classoperations__research_1_1_shared_time_limit.html#ad837e6231722ead53bbaf1c6e8f66032',1,'operations_research::SharedTimeLimit::LimitReached()']]],
['lin_5fmax_1221',['lin_max',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a389788c4ab111e9ea219b15febecbe06',1,'operations_research::sat::PROTOBUF_FINAL']]],
['lin_5fmin_1222',['lin_min',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a911bbc19fa15a5eac4d6210c0bb7fedf',1,'operations_research::sat::PROTOBUF_FINAL']]],
['linear_1223',['linear',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#afc419c7de5e5fd449141ef0314983e7b',1,'operations_research::sat::PROTOBUF_FINAL']]],
['linearargumentproto_1224',['LinearArgumentProto',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a817d6f70cce6b3d11d1d49c01df1fec8',1,'operations_research::sat::PROTOBUF_FINAL::LinearArgumentProto()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#acfddc4e8aa0292de8f00551f6a31714e',1,'operations_research::sat::PROTOBUF_FINAL::LinearArgumentProto(const LinearArgumentProto &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a942d839005c1a49096af445cda01f222',1,'operations_research::sat::PROTOBUF_FINAL::LinearArgumentProto(LinearArgumentProto &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a7e4600542fbdbb5a8aeaf73ef91233ed',1,'operations_research::sat::PROTOBUF_FINAL::LinearArgumentProto(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['linearbooleanconstraint_1225',['LinearBooleanConstraint',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#af0d871fdab73d5a900ae8152fce1714d',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanConstraint()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aeff982f99518ad0375b916497151a45f',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanConstraint(const LinearBooleanConstraint &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a595106a23582dc31341713516f323f52',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanConstraint(LinearBooleanConstraint &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aa75860a4984747cdd68373053122744b',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanConstraint(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['linearbooleanproblem_1226',['LinearBooleanProblem',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a205067c8a0b7e779bb38ba42b8d2c043',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanProblem()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a229fc15d288dbbc254a90a2b394d826a',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanProblem(const LinearBooleanProblem &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a011ed6f66a5f59bb11ba4650e322c74a',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanProblem(LinearBooleanProblem &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a60cd592401eb272fede45cd3deda51ab',1,'operations_research::sat::PROTOBUF_FINAL::LinearBooleanProblem(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['linearconstraintproto_1227',['LinearConstraintProto',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a74eaaa37670a2d429a0df860ebf1f9a2',1,'operations_research::sat::PROTOBUF_FINAL::LinearConstraintProto()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aa0f1e1ce9d6a134bda69a401ddb076ae',1,'operations_research::sat::PROTOBUF_FINAL::LinearConstraintProto(const LinearConstraintProto &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a18e78db02629fbb61acc36abe7ffd5f1',1,'operations_research::sat::PROTOBUF_FINAL::LinearConstraintProto(LinearConstraintProto &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a28394548b55e399130f1e55aca187d82',1,'operations_research::sat::PROTOBUF_FINAL::LinearConstraintProto(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['linearexpr_1228',['LinearExpr',['../classoperations__research_1_1sat_1_1_linear_expr.html',1,'LinearExpr'],['../classoperations__research_1_1sat_1_1_bool_var.html#a7678a938bf60a5c17fb47cf58995db0c',1,'operations_research::sat::BoolVar::LinearExpr()'],['../classoperations__research_1_1sat_1_1_int_var.html#a7678a938bf60a5c17fb47cf58995db0c',1,'operations_research::sat::IntVar::LinearExpr()'],['../classoperations__research_1_1sat_1_1_linear_expr.html#a708e7b52aae1fa3c440ef7ced3f06cd9',1,'operations_research::sat::LinearExpr::LinearExpr()'],['../classoperations__research_1_1sat_1_1_linear_expr.html#a9556910050b0975ea15c090d9f0ae801',1,'operations_research::sat::LinearExpr::LinearExpr(BoolVar var)'],['../classoperations__research_1_1sat_1_1_linear_expr.html#a6acce576bf92bd4158eed2a55840e4a3',1,'operations_research::sat::LinearExpr::LinearExpr(IntVar var)'],['../classoperations__research_1_1sat_1_1_linear_expr.html#a9c095e393028e08704f04f269ae2e0cc',1,'operations_research::sat::LinearExpr::LinearExpr(int64 constant)']]],
['linearexpressionproto_1229',['LinearExpressionProto',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a0a32a373ea150f59e149b2e1c5395ced',1,'operations_research::sat::PROTOBUF_FINAL::LinearExpressionProto()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aa7f9d079868fb4ef34c6cc1fba5e80a5',1,'operations_research::sat::PROTOBUF_FINAL::LinearExpressionProto(const LinearExpressionProto &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a0662c8e3658dec6a373cfc0cf0d68458',1,'operations_research::sat::PROTOBUF_FINAL::LinearExpressionProto(LinearExpressionProto &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a25e8fb62d5432940eba46c102e4e777f',1,'operations_research::sat::PROTOBUF_FINAL::LinearExpressionProto(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['linearization_5flevel_1230',['linearization_level',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a5ebedbd614898664961cc9e392717753',1,'operations_research::sat::PROTOBUF_FINAL']]],
['linearobjective_1231',['LinearObjective',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a11ff64811767762dfe4c754e745dad86',1,'operations_research::sat::PROTOBUF_FINAL::LinearObjective()'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aa2856c6d65014939d6d2b78a2c852c0a',1,'operations_research::sat::PROTOBUF_FINAL::LinearObjective(const LinearObjective &from)'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#adb39ff0b7199986bf989ea6973b313d7',1,'operations_research::sat::PROTOBUF_FINAL::LinearObjective(LinearObjective &&from) noexcept'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#aa1aaaabbcb6acb332f4c7aa2f69b6b4a',1,'operations_research::sat::PROTOBUF_FINAL::LinearObjective(::PROTOBUF_NAMESPACE_ID::Arena *arena)']]],
['literals_1232',['literals',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a9bdd9b9cd1ab7d9ded95db88d9f1482b',1,'operations_research::sat::PROTOBUF_FINAL::literals(int index) const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ad36b5d1a40af0e486a0ff999e7f1c4ae',1,'operations_research::sat::PROTOBUF_FINAL::literals() const']]],
['literals_5fsize_1233',['literals_size',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const'],['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a72d41feda9a93c11089d3d99d6270999',1,'operations_research::sat::PROTOBUF_FINAL::literals_size() const']]],
['lns_5ffocus_5fon_5fdecision_5fvariables_1234',['lns_focus_on_decision_variables',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a1d7359b01bafc8348537c5501fd29e8d',1,'operations_research::sat::PROTOBUF_FINAL']]],
['lock_5fbased_1235',['LOCK_BASED',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#afe79eadd7a7aac1d25e518d99790978c',1,'operations_research::sat::PROTOBUF_FINAL']]],
['log_5fsearch_5fprogress_1236',['log_search_progress',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a15aff33b9baefb846c984351291ae92d',1,'operations_research::sat::PROTOBUF_FINAL']]],
['lower_5fbound_1237',['lower_bound',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#a19f3335a255f7cd2fc24e58ead19c914',1,'operations_research::sat::PROTOBUF_FINAL']]],
['lp_5fsearch_1238',['LP_SEARCH',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ace8b3f752ceddc8a6a7ef6140622c6a4',1,'operations_research::sat::PROTOBUF_FINAL']]],
['luby_5frestart_1239',['LUBY_RESTART',['../classoperations__research_1_1sat_1_1_p_r_o_t_o_b_u_f___f_i_n_a_l.html#ade0b76c65f672e4a7c4aa8787fc89b86',1,'operations_research::sat::PROTOBUF_FINAL']]]
];
|
// THIS FILE IS AUTO GENERATED
var GenIcon = require('../lib').GenIcon
module.exports.GiMilkCarton = function GiMilkCarton (props) {
return GenIcon({"tag":"svg","attr":{"viewBox":"0 0 512 512"},"child":[{"tag":"path","attr":{"d":"M302.958 20.019l-93.916 46.564v35.404c31.305-15.522 62.61-31.047 93.916-46.568zm6.53 52.252l-95.4 47.3 63.036 78.137 95.397-47.303zm-111.915 55.492l-33.732 16.724h47.224zm-22.119 34.722l71.615 26.633-21.484-26.633zm-41.021 3.948v276.752l131.22 48.796v-276.75zm243.134 1.56c-31.306 15.521-62.61 31.044-93.916 46.567v275.863l93.916-46.567zM176.501 272.466s-15.3 15.085-9.889 24.203c4.167 7.02 21.889 5.418 21.889 5.418l15.549 3.848s17.72 10.374 21.887 5.416c5.41-6.44-9.887-29.098-9.887-29.098s35.91 33.492 29.662 47.318c-3.143 6.955-24.719-2.117-24.719-2.117s5.495 7.21 4.944 10.549c-1.001 6.062-13.774 9.916-13.774 9.916s1.111 21.24-6 25.168c-5.147 2.843-14.628.497-19.775-4.895-7.111-7.448-6-28.136-6-28.136s-12.775-10.177-13.776-16.735c-.55-3.611 4.944-8.103 4.944-8.103s-21.575-1.607-24.717-10.117c-6.247-16.919 29.662-32.635 29.662-32.635z"}}]})(props);
};
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('document_indexing', '0004_auto_20150708_0113'),
]
operations = [
migrations.AddField(
model_name='index',
name='slug',
field=models.SlugField(
null=True, max_length=128, blank=True, help_text='This values '
'will be used by other apps to reference this index.',
unique=True, verbose_name='Slug'
),
preserve_default=True,
),
]
|
varsName = [] #Список имен переменных
varsVal = [] #Список значений перменных
funcName = [] #Список имен функций
funcComm = [] #Список команд функций
#Главная функция
def main(command):
#Ищет совпадение с $in (Вводом)
if (command.find("$in") >= 0 and command.find("<") > 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
command = command.replace("$in", "") #Удаляет часть $in
command = command.replace("<", "") #Уберает <
varsVal[varsName.index(command)] = input("[" + command + "] <- ") #Заменяет значение перменной на введёное (Всегда будет строка)
#Ищет совпадение на вывод
if (command.find("$out") >= 0 and command.find("<") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
command = command.replace("$out", "") #Заменяет часть $out
#Ищет / для начала вывода переменной
if (command.find("/") >= 0 and command.find("") >= 0):
command = command.replace("<","") #Уберает <
command = command.replace(" ", "") #Уберает пробелы
command = command.split("/") #Разделяет на 3 части оставшуюся команду
print(f"[{command[1]}] ->", varsVal[varsName.index(command[1])]) #Выводит значение переменной найденной по оставшейся команде (Команда поделена на 3 части, 2 - название перменной)
command = "" #Очищает команду
else:
command = command.split("<") #Разделяет команду по символу <
print("[I/O] " + command[0]) #Выводит текст
command = "" #Очищает команду
#Ищет "=" для инициализации переменной
if (command.find("=") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.split("=") #Разделяет по символу равно
#Ищет #і для типа int
if (var[0].find("int") >= 0):
varsName.append(str(var[0]).replace("int", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(int(var[1].replace(" ", "")))
#Ищет #f для типа float
if (var[0].find("float") >= 0):
varsName.append(var[0].replace("float", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(float(var[1].replace("float", "")))
#Ищет & для типа string
if (var[1].find('"') >= 0 and var[0].find("string") >= 0):
varsName.append(var[0].replace("string", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(var[1].replace('"', "")) #Добавляет в список значений переменных новое значение, равное второй части команды
#Ищет bool для типа bool
if (var[0].find("bool") >= 0):
varsName.append(var[0].replace("bool"))
if (var[1].replace(" ", "") == "True"):
varsVal.append(True) #Добавляет True в список значений переменных
else:
varsVal.append(False) #Добавляет False в список значений переменных
#Ищет * для копирования другой переменной
if (var[0].find("*") == 0):
varsName.append(var[0].replace("*", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(varsVal[varsName.index(var[1])]) #Добавляет в varsVal значение из (команды) списка, под индексом 1
#Ищет .toInt() для запуска кода
if (command.find(".toInt()") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.replace(".toInt()", "") #Удаляет .toInt()
varsVal[varsName.index(var)] = int(varsVal[varsName.index(var)]) #Изменяет значение в varsVal по индексу, равному в varsName, найденому по тексту оставшейся команды
#Ищет .toFloat() для запуска кода
if (command.find(".toFloat()") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.replace(".toFloat()", "") #Удаляет .toFloat()
varsVal[varsName.index(var)] = float(varsVal[varsName.index(var)]) #Изменяет значение в varsVal по индексу, равному в varsName, найденому по тексту оставшейся команды
#Ищет %exit для остановки программы
if (command.find("%exit") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
exit()
#Ищет %import для открвтия файла/библиотеки
if (command.find("%import") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.replace("%import", "") #Удаляет %import
file = open(f"{var}") #Открывает файл по имени + расширение
for command in file:
if (command.find("def") >= 0 and command.find(":") >= 0):
fn = command.split(":") #Разделяет команду по символу :
fnc = fn[2].split(";") #Берём часть с командами и делим символом ;
funcName.append(fn[1].replace(" ", "")) #Добавляем в список имен функций введёное имя
funcComm.append(fnc) #Добавляем в список команд функций введёные функции
#Ищет "=" для инициализации переменной
if (var[0].find("int") >= 0):
varsName.append(str(var[0]).replace("int", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(int(var[1].replace(" ", "")))
#Ищет float для типа float
if (var[0].find("float") >= 0):
varsName.append(var[0].replace("float", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(float(var[1].replace("float", "")))
#Ищет string и " для типа string
if (var[1].find('"') >= 0 and var[0].find("string") >= 0):
varsName.append(var[0].replace("string", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(var[1].replace('"', "")) #Добавляет в список значений переменных новое значение, равное второй части команды
#Ищет bool для типа bool
if (var[0].find("bool") >= 0):
varsName.append(var[0].replace("bool"))
if (var[1].replace(" ", "") == "True"):
varsVal.append(True) #Добавляет True в список значений переменных
else:
varsVal.append(False) #Добавляет False в список значений переменных
#Ищет * для копирования другой переменной
if (var[0].find("*") == 0):
varsName.append(var[0].replace("*", "")) #Добавляет в названия переменных название новой переменной
varsVal.append(varsVal[varsName.index(var[1])]) #Добавляет в varsVal значение из (команды) списка, под индексом 1
#Ищет ++ для прибавления 1 к переменной
if (command.find("++") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.replace("++", "") #Удаляет ++
varsVal[varsName.index(var)] = varsVal[varsName.index(var)] + 1
#Прибавляет 1 к переменной
#Ищет -- для отнимания 1 от переменной
if (command.find("--") >= 0 and command.find("while") == -1 and command.find("def") == -1 and command.find("if") == -1):
var = command.replace(" ", "") #Удаляет пробелы
var = var.replace("--", "") #Удаляет --
varsVal[varsName.index(var)] = varsVal[varsName.index(var)] - 1
#Убавляет 1 от переменной
#Ищет while и : для начала цикла
if (command.find("while") >= 0 and command.find(".") >= 0 and command.find("def") == -1):
whi = command.replace(" ", "") #Удаляет пробелы
wh = whi.split(".") #Разделяет командк по символу .
wh = wh[1].split(",") #Берём часть с командами и делим символом ,
while True: #Создаём бесконечный цикл
for i in range(len(wh)): #Проходимся по всем командам
if (wh[i] != "%break"):
main(wh[i]) #Обрабатываем команду
else:
break
#Ищет def и : для инициализации функции
if (command.find("def(") >= 0 and command.find(")") >= 0 and command.find("while") == -1 and command.find("if") == -1):
fn = command.replace("def(", "") #Убераем def(
fn = fn.replace(")", "") #Убераем )
fna = fn.split(",") #Разделяет команду по символу ,
fnn = fna[0] #Берём первый элемент (имя)
fnc = fna #Берём часть с командами
del fnc[0] #Удаляем первый элемент
funcName.append(fnn)
#Добавляем в список имен функций введёное имя
funcComm.append(fnc) #Добавляем в список команд функций введёные функции
#Ищет lof( и ) для инициализации функции
if (command.find("lof(") >= 0 and command.find(")") >= 0 and command.find("while") >= -1 and command.find("def") >= -1 and command.find("if") >= -1):
fnl = command.split("(") #Разделяет командy по символу :
fnn = fnl[1].replace(")", "")
print(fnn)
print(funcComm[funcName.index(fnn)]) #Получаем команды
comm = []
for i in range(len(comm)): #Проходимся по всем командам
main(comm[i]) #Обрабатываем команду
#Ищет def и : для инициализации функции
if (command.find("if") >= 0 and command.find("'") >= 0 and command.find("while") == -1 and command.find("def") == -1):
ifa = command.split("'") #Разделяет команду по символу '
ifb = ifa[2].split("`") #Берём часть с командами и делим символом `
if (ifa[1].find("==") >= 0):
ifc = ifa[1].split("==") #Разделяет по
ifv1 = ifc[0].replace(" ", "") #Задает имя первой переменной
ifv2 = ifc[1].replace(" ", "") #Задает имя второй переменной
#Сравнивает на то, одинаковы ли 2 переменные
if (varsVal[varsName.index(ifv1)] == varsVal[varsName.index(ifv2)]):
#Проходится по всем командам
for i in range(len(ifb)):
main(ifb[i])
if (ifa[1].find(">=") >= 0):
ifc = ifa[1].split(">=") #Разделяет по
ifv1 = ifc[0].replace(" ", "") #Задает имя первой переменной
ifv2 = ifc[1].replace(" ", "") #Задает имя второй переменной
#Сравнивает на то, одинаковы ли 2 переменные
if (varsVal[varsName.index(ifv1)] >= varsVal[varsName.index(ifv2)]):
#Проходится по всем командам
for i in range(len(ifb)):
main(ifb[i])
if (ifa[1].find("<=") >= 0):
ifc = ifa[1].split("<=") #Разделяет по
ifv1 = ifc[0].replace(" ", "") #Задает имя первой переменной
ifv2 = ifc[1].replace(" ", "") #Задает имя второй переменной
#Сравнивает на то, одинаковы ли 2 переменные
if (varsVal[varsName.index(ifv1)] <= varsVal[varsName.index(ifv2)]):
#Проходится по всем командам
for i in range(len(ifb)):
main(ifb[i])
#Находит %clear для oчстки консоли
if (command.find("%clear") >= 0 and command.find("while") == -1 and command.find("def") == -1):
for i in range(200):
print("\n")
command = "" #очищает команду
|
from datetime import datetime, time, timedelta
from itertools import chain
from random import Random
import pytz
from ...core.util import find_overlap
def fetch_free_busy(date, tz, uid):
# We have to include the day before/after since we may end up needing data
# from those days depending on the timezone
free_busy = list(
chain.from_iterable(
_generate_free_busy(date + timedelta(days=offset), uid)
for offset in (-1, 0, 1)
)
)
tzinfo = pytz.timezone(tz)
res = []
for (start, end) in free_busy:
overlap = find_overlap(date, start, end, tzinfo)
if overlap:
res.append(
(
(overlap[0].hour, overlap[0].minute),
(overlap[1].hour, overlap[1].minute),
)
)
return res
def _generate_free_busy(date, uid):
rnd = Random(date.isoformat() + uid)
if rnd.randint(0, 1):
start = rnd.randint(4, 19)
end = rnd.randint(start + 1, 21)
start_dt = pytz.utc.localize(datetime.combine(date, time(start)))
end_dt = pytz.utc.localize(datetime.combine(date, time(end)))
return [(start_dt, end_dt)]
else:
start = rnd.randint(5, 8)
end = rnd.randint(start + 1, start + 3)
start2 = rnd.randint(12, 14)
end2 = rnd.randint(start2 + 1, start2 + 5)
start_dt = pytz.utc.localize(
datetime.combine(date, time(start, 30 * rnd.randint(0, 1)))
)
end_dt = pytz.utc.localize(
datetime.combine(date, time(end, 30 * rnd.randint(0, 1)))
)
start_dt2 = pytz.utc.localize(
datetime.combine(date, time(start2, 15 * rnd.randint(0, 1)))
)
end_dt2 = pytz.utc.localize(datetime.combine(date, time(end2)))
return [(start_dt, end_dt), (start_dt2, end_dt2)]
|
// Math science
const utils = {
// Sum an array
sum: arr => arr.reduce((acc, curr) => acc + curr, 0),
// create an array of numbers between min and max (edges included)
range: (min, max) => Array.from({ length: max - min + 1 }, (_, i) => min + i),
// pick a random number between min and max (edges included)
random: (min, max) => min + Math.floor(max * Math.random()),
// Given an array of numbers and a max...
// Pick a random sum (< max) from the set of all available sums in arr
randomSumIn: (arr, max) => {
const sets = [[]];
const sums = [];
for (let i = 0; i < arr.length; i++) {
for (let j = 0, len = sets.length; j < len; j++) {
const candidateSet = sets[j].concat(arr[i]);
const candidateSum = utils.sum(candidateSet);
if (candidateSum <= max) {
sets.push(candidateSet);
sums.push(candidateSum);
}
}
}
return sums[utils.random(0, sums.length)];
},
};
export default utils;
|
import { createStore, applyMiddleware, compose, combineReducers } from 'redux';
import { composeWithDevTools } from 'redux-devtools-extension/logOnlyInProduction';
import { connectRoutes } from 'redux-first-router';
import { reducer as reduxFormReducer } from 'redux-form';
import routesMap from './routesMap'
import * as reducers from './reducers'
export default (history, preloadedState) => {
const { reducer, middleware, enhancer, thunk } = connectRoutes(
history,
routesMap
);
const rootReducer = combineReducers({ ...reducers, form: reduxFormReducer, location: reducer });
const middlewares = applyMiddleware(middleware);
const enhancers = composeEnhancers(enhancer, middlewares);
const store = createStore(rootReducer, preloadedState, enhancers);
if (module.hot && process.env.NODE_ENV === 'development') {
module.hot.accept('./reducers/index', () => {
const reducers = require('./reducers/index');
const rootReducer = combineReducers({ ...reducers, location: reducer });
store.replaceReducer(rootReducer);
});
}
return { store, thunk };
}
const composeEnhancers = (...args) =>
typeof window !== 'undefined'
? composeWithDevTools({})(...args)
: compose(...args)
|
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// **GENERATED FILE DO NOT MODIFY**
//
// This file is generated using:
// `tool/update-boringssl.py`
#include "../../../../../third_party/boringssl/src/crypto/cpu-intel.c"
|
const path = require('path');
const { appendFile, mkdirSync, existsSync } = require('fs');
/**
* Log on te console te string like > [ 18/12/2019 - 03:53:33 ] - src\index.js:7 -> Teste de Log
* @param {string|object} message Message to log on console
* @param {boolean} [fileSave] File to save log
* @returns {void}
*/
module.exports = {
info,
warn,
error
};
/**
* Log on te console te string like > [ 18/12/2019 - 03:53:33 - warn ] - src\index.js:7 -> Teste de Log
* @param {string|object} message Message to log on console
* @returns {void}
*/
function warn(message) {
if (typeof message === "object") {
message = JSON.stringify(message);
}
Error.prepareStackTrace = (_, stack) => stack;
const err = new Error();
Error.captureStackTrace(err, arguments.callee);
const callee = err.stack[0];
let relativePath = path.relative(process.cwd(), callee.getFileName());
let log = formatDate(message, `${relativePath}:${callee.getLineNumber()}`, 'warn');
console.log(log);
let localPath = path.resolve('log/loggers.log').replace(/\\/g, '/');
writeLogFile(log, localPath);
}
/**
* Log on te console te string like > [ 18/12/2019 - 03:53:33 - info ] - src\index.js:7 -> Teste de Log
* @param {string|object} message Message to log on console
* @returns {void}
*/
function info(message) {
if (typeof message === "object") {
message = JSON.stringify(message);
}
Error.prepareStackTrace = (_, stack) => stack;
const err = new Error();
Error.captureStackTrace(err, arguments.callee);
const callee = err.stack[0];
let relativePath = path.relative(process.cwd(), callee.getFileName());
let log = formatDate(message, `${relativePath}:${callee.getLineNumber()}`, 'info');
console.log(log);
let localPath = path.resolve('log/loggers.log').replace(/\\/g, '/');
writeLogFile(log, localPath);
}
/**
* Log on te console te string like > [ 18/12/2019 - 03:53:33 - error ] - src\index.js:7 -> Teste de Log
* @param {string|object} message Message to log on console
* @returns {void}
*/
function error(message) {
if (typeof message === "object") {
message = JSON.stringify(message);
}
Error.prepareStackTrace = (_, stack) => stack;
const err = new Error();
Error.captureStackTrace(err, arguments.callee);
const callee = err.stack[0];
let relativePath = path.relative(process.cwd(), callee.getFileName());
let log = formatDate(message, `${relativePath}:${callee.getLineNumber()}`, 'error');
console.log(log);
let localPath = path.resolve('log/loggers.log').replace(/\\/g, '/');
writeLogFile(log, localPath);
}
function formatDate(message, local, logType) {
let data = new Date().toISOString();
let date = data.split('T')[0];
let time = data.split('T')[1];
// Day separator
let month = date.slice(5, 7);
let day = date.slice(8, 10);
let year = date.slice(0, 4);
// Time separator
let hour = time.split(':')[0];
let minute = time.split(':')[1];
let second = time.split(':')[2].split('.')[0];
return `> [ ${day}/${month}/${year} - ${hour}:${minute}:${second} - ${logType} ] - ${local} -> ${message}`;
}
async function writeLogFile(messageLog, filePath) {
let strokedDir = filePath.split('/');
let dir = strokedDir.slice(0, strokedDir.length - 1).join('/');
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true }, (err) => {
if (err) throw err;
console.log('> File created');
});
}
appendFile(filePath, messageLog + '\n', function (err) {
if (err) {
throw err;
}
});
}
|
/*
* catberry
*
* Copyright (c) 2014 Denis Rechkunov and project contributors.
*
* catberry's license follows:
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* This license applies to all parts of catberry that are not externally
* maintained libraries.
*/
'use strict';
module.exports = ModuleApiProvider;
var util = require('util'),
propertyHelper = require('../helpers/propertyHelper'),
ModuleApiProviderBase = require('../ModuleApiProviderBase');
util.inherits(ModuleApiProvider, ModuleApiProviderBase);
/**
* Creates new instance of module API provider.
* @param {ServiceLocator} $serviceLocator Service locator
* to resolve dependencies.
* @constructor
* @extends ModuleApiProviderBase
*/
function ModuleApiProvider($serviceLocator) {
ModuleApiProviderBase.call(this, $serviceLocator);
propertyHelper.defineReadOnly(this, 'isBrowser', false);
propertyHelper.defineReadOnly(this, 'isServer', true);
}
/**
* Current list of redirects which were called in this context.
* @type {string}
*/
ModuleApiProvider.prototype.redirectedTo = null;
/**
* Determines if clearHash method was called in this context.
* @type {Boolean}
*/
ModuleApiProvider.prototype.isHashCleared = false;
/**
* Redirects current page to specified URL.
* @param {string} locationUrl URL to direct.
*/
ModuleApiProvider.prototype.redirect = function (locationUrl) {
this.redirectedTo = locationUrl;
};
/**
* Clears current location's hash.
*/
ModuleApiProvider.prototype.clearHash = function () {
this.isHashCleared = true;
};
/**
* Does nothing because on server it is impossible.
* @param {string} moduleName Name of module to render.
* @param {string} placeholderName Name of placeholder to refresh.
* @param {Function} callback Callback on finish.
*/
ModuleApiProvider.prototype.requestRefresh =
function (moduleName, placeholderName, callback) {
if (callback instanceof Function) {
callback();
}
};
/**
* Does nothing because on server it is impossible.
* @param {string} moduleName Name of module to render.
* @param {string} placeholderName Name of placeholder to refresh.
* @param {Function} callback Callback on finish.
*/
ModuleApiProvider.prototype.requestRender =
function (moduleName, placeholderName, callback) {
if (callback instanceof Function) {
callback();
}
};
|
"""
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import warnings
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import getargspec, formatargspec, long, basestring
from numpy import expand_dims as n_expand_dims
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'bool_',
'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue',
'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray', 'asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'ceil', 'choose', 'clip', 'common_fill_value', 'compress',
'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh',
'count', 'cumprod', 'cumsum',
'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump',
'dumps',
'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
'fromfunction',
'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal',
'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2',
'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or',
'masked', 'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'prod', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_', 'round',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes',
'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where',
'zeros']
MaskType = np.bool_
nomask = MaskType(0)
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError as errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
# warnings.warn(msg % (type(obj),
# getattr(obj, '__name__', '???'),
# errmsg))
return sig
#####--------------------------------------------------------------------------
#---- --- Exceptions ---
#####--------------------------------------------------------------------------
class MAError(Exception):
"""Class for masked array related errors."""
pass
class MaskError(MAError):
"Class for mask related errors."
pass
#####--------------------------------------------------------------------------
#---- --- Filling options ---
#####--------------------------------------------------------------------------
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c' : 1.e20 + 0.0j,
'f' : 1.e20,
'i' : 999999,
'O' : '?',
'S' : 'N/A',
'u' : 999999,
'V' : '???',
'U' : 'N/A',
'M8[D]' : np.datetime64('NaT', 'D'),
'M8[us]' : np.datetime64('NaT', 'us')
}
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
elif obj.kind == 'M':
defval = default_filler.get(obj.str[1:], '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, str):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int...
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
#####--------------------------------------------------------------------------
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
#####--------------------------------------------------------------------------
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
#####--------------------------------------------------------------------------
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
#invalid = (numpy.isnan(a._data) | numpy.isinf(a._data))
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
#####--------------------------------------------------------------------------
#---- --- Ufuncs ---
#####--------------------------------------------------------------------------
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""Define a domain for safe division."""
def __init__ (self, tolerance=None):
self.tolerance = tolerance
def __call__ (self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""DomainGreater(v)(x) is True where x <= v."""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""DomainGreaterEqual(v)(x) is True where x < v."""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
#..............................................................................
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__ (self, mufunc, fill=0, domain=None):
""" _MaskedUnaryOperation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
#
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
# Case 1.2. : Function without a domain
else:
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
# Case 2.1. : The result is scalarscalar
if not result.ndim:
if m:
return masked
return result
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input
# Now, that's plain silly: in C, we would just skip the element and keep
# the original, but we do have to do it that way in Python
if m is not nomask:
# In case result has a lower dtype than the inputs (as in equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
if isinstance(a, MaskedArray):
subtype = type(a)
else:
subtype = MaskedArray
result = result.view(subtype)
result._mask = m
result._update_from(a)
return result
#
def __str__ (self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, mbfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data, as ndarray
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
# Get the mask
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m.any():
np.copyto(result, 0, casting='unsafe', where=m)
# This only makes sense if the operation preserved the dtype
if result.dtype == da.dtype:
result += m * da
# Transforms to a (subclass of) MaskedArray
result = result.view(get_masked_subclass(a, b))
result._mask = m
# Update the optional info from the inputs
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def reduce(self, target, axis=0, dtype=None):
"""Reduce `target` along the given `axis`."""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
return self.f.reduce(t, axis).view(tclass)
t = t.view(tclass)
t._mask = m
tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
tr = tr.view(tclass)
if mr.ndim > 0:
tr._mask = mr
return tr
elif mr:
return masked
return tr
def outer (self, a, b):
"""Return the function applied to the outer product of a and b.
"""
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
# check it worked
if d is NotImplemented:
return NotImplemented
if m is not nomask:
np.copyto(d, da, where=m)
if d.shape:
d = d.view(get_masked_subclass(a, b))
d._mask = m
return d
def accumulate (self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
t = filled(target, self.filly)
return self.f.accumulate(t, axis).view(tclass)
def __str__ (self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data and the mask
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
m |= mb
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da
np.copyto(result, 0, casting='unsafe', where=m)
result += m * da
result = result.view(get_masked_subclass(a, b))
result._mask = m
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def __str__ (self):
return "Masked version of " + str(self.f)
#..............................................................................
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs .......................................................
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs ...............................................................
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs ......................................................
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
#####--------------------------------------------------------------------------
#---- --- Mask creation functions ---
#####--------------------------------------------------------------------------
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = newtype
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getdata(arr).dtype)
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has
a dtype of MaskType (bool). If the dtype is flexible, each field
has a boolean dtype.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or (m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
#
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
#
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
#
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
#
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
#####--------------------------------------------------------------------------
#--- --- Masking functions ---
#####--------------------------------------------------------------------------
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
# An alternative implementation relies on filling first: probably not needed.
# d = filled(x, 0)
# c = umath.equal(d, value)
# m = mask_or(c, getmask(x))
# return array(d, mask=m, copy=copy)
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print eat
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print eat
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
#####--------------------------------------------------------------------------
#---- --- Printing options ---
#####--------------------------------------------------------------------------
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__ (self, display):
"Create the masked_print_option object."
self._display = display
self._enabled = True
def display(self):
"Display the string to print for masked values."
return self._display
def set_display (self, s):
"Set the string to print for masked values."
self._display = s
def enabled(self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, shrink=1):
"Set the enabling shrink to `shrink`."
self._enabled = shrink
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
#####--------------------------------------------------------------------------
#---- --- MaskedArray class ---
#####--------------------------------------------------------------------------
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
Private function
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
#
def flatten_sequence(iterable):
"""Flattens a compound of nested iterables."""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
#
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
class _arraymethod(object):
"""
Define a wrapper for basic array methods.
Upon call, returns a masked array, where the new ``_data`` array is
the output of the corresponding method called on the original
``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Attributes
----------
_onmask : bool
Holds the `onmask` parameter.
obj : object
The object calling `_arraymethod`.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
"""
def __init__(self, funcname, onmask=True):
self.__name__ = funcname
self._onmask = onmask
self.obj = None
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
methdoc = getattr(ndarray, self.__name__, None) or \
getattr(np, self.__name__, None)
if methdoc is not None:
return methdoc.__doc__
#
def __get__(self, obj, objtype=None):
self.obj = obj
return self
#
def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
#
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
### This won't work is ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None,
copy=False, subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True,
**options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data............
_data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask..........
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray...
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma .......
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
_sharedmask = True
# Process mask ...............................
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
# Case 1. : no mask in input ............
if mask is nomask:
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
# Case 2. : With a mask in input ........
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.......
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check....
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
#
def _update_from(self, obj):
"""Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""Finalizes the masked array.
"""
# Get main attributes .........
self._update_from(obj)
if isinstance(obj, ndarray):
odtype = obj.dtype
if odtype.names:
_mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype))
else:
_mask = getattr(obj, '_mask', nomask)
else:
_mask = nomask
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
#..........
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask................
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
#....
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# Should we update the mask ?
if (getattr(output, '_mask', nomask) is not nomask):
if dtype is None:
dtype = output.dtype
mdtype = make_mask_descr(dtype)
output._mask = self._mask.view(mdtype, ndarray)
# Try to reset the shape of the mask (if we don't have a void)
try:
output._mask.shape = output.shape
except (AttributeError, TypeError):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print x.astype(int32)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray)
dout = ndarray.__getitem__(_data, indx)
# We could directly use ndarray.__getitem__ on self...
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet...
# So it's easier to stick to the current version
_mask = self._mask
if not getattr(dout, 'ndim', False):
# A record ................
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
else:
# Force dout to MA ........
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value ....
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long...
return dout
def __setitem__(self, indx, value):
"""x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass'))
_mask = ndarray.__getattribute__(self, '_mask')
if isinstance(indx, basestring):
ndarray.__setitem__(_data, indx, value)
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
#........................................
_dtype = ndarray.__getattribute__(_data, 'dtype')
nbfields = len(_dtype.names or ())
#........................................
if value is masked:
# The mask wasn't set: create a full version...
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
#........................................
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
ndarray.__setitem__(_mask, indx, mval)
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
if not self._isfield:
self.unshare_mask()
_mask = ndarray.__getattribute__(self, '_mask')
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
ndarray.__setitem__(_mask, indx, mval)
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
ndarray.__setitem__(_data, indx, dval)
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported..."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
ndarray.__setitem__(_data, indx, dindx)
_mask[indx] = mindx
return
def __getslice__(self, i, j):
"""x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative
indices is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask
those locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""Set the mask.
"""
idtype = ndarray.__getattribute__(self, 'dtype')
current_mask = ndarray.__getattribute__(self, '_mask')
if mask is masked:
mask = True
# Make sure the mask is set
if (current_mask is nomask):
# Just don't do anything is there's nothing to do...
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
# No named fields.........
if idtype.names is None:
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Named fields w/ ............
else:
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()]*len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
#....
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some cases.
# return self._mask.reshape(self.shape)
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis= -1)
def _set_recordmask(self):
"""Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
#............................................
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
#............................................
baseclass = property(fget=lambda self:self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
#
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
#
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
#
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
#
if self is masked_singleton:
return np.asanyarray(fill_value)
#
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
#ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(tuple((f if _m else _d) for _d, _m in
zip(self._data.tolist(), m)))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("O")
res.view(ndarray)[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
n = len(self.shape)
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def __eq__(self, other):
"Check whether other equals self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = ndarray.__eq__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = ndarray.__ne__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __rtruediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(other, self)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __rfloordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(other, self)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#
def __rpow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(other, self)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __ifloordiv__(self, other):
"Floor divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
#............................................
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
if m is nomask:
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.full(t, n, dtype=np.intp)
n1 = np.size(m, axis)
n2 = np.sum(m, axis=axis, dtype=np.intp)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
# try:
# ndarray.resize(self, newshape, refcheck=refcheck)
# if self.mask is not nomask:
# self._mask.resize(newshape, refcheck=refcheck)
# except ValueError:
# raise ValueError("Cannot resize an array that has been referenced "
# "or is referencing another array in this way.\n"
# "Use the numpy.ma.resize function.")
# return None
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
#
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print x
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print x
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
m = self._mask
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
#....
self._data.put(indices, values, mode=mode)
#....
if m is nomask:
m = getmask(values)
else:
m = m.copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
#............................................
def ids (self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
#............................................
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.sum()
25
>>> print x.sum(axis=1)
[4 5 16]
>>> print x.sum(axis=0)
[8 5 12]
>>> print type(x.sum(axis=0, dtype=np.int64)[0])
<type 'numpy.int64'>
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print marr.cumsum()
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
std.__doc__ = np.std.__doc__
#............................................
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
result._mask = self._mask
result._update_from(self)
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
#............................................
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
{ndarray, scalar}
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print x
[[-- --]
[2 3]]
>>> print x.argmin(axis=0, fill_value=-1)
[0 0]
>>> print x.argmin(axis=0, fill_value=9)
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis= -1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print a
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print a
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print a
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
idx = np.indices(self.shape)
idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
idx_l = idx.tolist()
tmp_mask = self._mask[idx_l].flat
tmp_data = self._data[idx_l].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
#............................................
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print x
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print x.mini(axis=1)
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
#........................
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
min_value = self.min(axis=axis, fill_value=fill_value)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods ---------------------------------------
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self:self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
#--------------------------------------------
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array .............
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays ...............
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays...
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
# if fill_value is not None:
# return self.filled(fill_value).tolist()
# result = self.filled().tolist()
# # Set temps to save time when dealing w/ mrecarrays...
# _mask = self._mask
# if _mask is nomask:
# return result
# nbdims = self.ndim
# dtypesize = len(self.dtype)
# if nbdims == 0:
# return tuple([None] * dtypesize)
# elif nbdims == 1:
# maskedidx = _mask.nonzero()[0].tolist()
# if dtypesize:
# nodata = tuple([None] * dtypesize)
# else:
# nodata = None
# [operator.setitem(result, i, nodata) for i in maskedidx]
# else:
# for idx in zip(*[i.tolist() for i in _mask.nonzero()]):
# tmp = result
# for i in idx[:-1]:
# tmp = tmp[i]
# tmp[idx[-1]] = None
# return result
#........................
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
#........................
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
#........................
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("Not implemented yet, sorry...")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.toflex()
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype ....
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
#
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(cf),
#self._data.tolist(),
getmaskarray(self).tobytes(cf),
#getmaskarray(self).tolist(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
#
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False):
dtype = dtype or data.dtype
_data = np.array(data, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"Get the index..."
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if (m is nomask):
return self._data.__str__()
m = tuple(m)
if (not any(m)):
return self._data.__str__()
r = self._data.tolist()
p = masked_print_option
if not p.enabled():
p = 'N/A'
else:
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)]
return "(%s)" % ", ".join(r)
def __repr__(self):
m = self._mask
if (m is nomask):
return self._data.__repr__()
m = tuple(m)
if not any(m):
return self._data.__repr__()
p = masked_print_option
if not p.enabled():
return self.filled(self.fill_value).__repr__()
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)]
return "(%s)" % ", ".join(r)
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
#####--------------------------------------------------------------------------
#---- --- Shortcuts ---
#####---------------------------------------------------------------------------
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
# We define the masked singleton as a float for higher precedence...
# Note that it can be tricky sometimes w/ type comparison
class MaskedConstant(MaskedArray):
#
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
#
def __new__(self):
return self._data.view(self)
#
def __array_finalize__(self, obj):
return
#
def __array_wrap__(self, obj):
return self
#
def __str__(self):
return str(masked_print_option._display)
#
def __repr__(self):
return 'masked'
#
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=False,
mask=nomask, fill_value=None,
keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0,
):
"""array(data, dtype=None, copy=False, order=False, mask=nomask,
fill_value=None, keep_mask=True, hard_mask=False, shrink=True,
subok=True, ndmin=0)
Acts as shortcut to MaskedArray, with options in a different order
for convenience. And backwards compatibility...
"""
#!!!: we should try to put 'order' somwehere
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok,
keep_mask=keep_mask, hard_mask=hard_mask,
fill_value=fill_value, ndmin=ndmin, shrink=shrink)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
#####---------------------------------------------------------------------------
#---- --- Extrema functions ---
#####---------------------------------------------------------------------------
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
#.........
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = { 'axis' : axis }
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
#.........
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
#............................
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
#............................
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
#..........................................................
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a min method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""a.ptp(axis=None) = a.max(axis)-a.min(axis)"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
#####---------------------------------------------------------------------------
#---- --- Definition of functions from the corresponding methods ---
#####---------------------------------------------------------------------------
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None))
return doc
#
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
#..............................................................................
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
# if fb.dtype.char in typecodes["Integer"]:
# return masked_array(umath.power(fa, fb), m)
# m = mask_or(m, (fa < 0) & (fb != fb.astype(int)))
# if m is nomask:
# return masked_array(umath.power(fa, fb))
# else:
# fa = fa.copy()
# if m.all():
# fa.flat = 1
# else:
# np.copyto(fa, 1, where=m)
# return masked_array(umath.power(fa, fb), m)
#..............................................................................
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmax.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
# return
indx = np.indices(a.shape).tolist()
indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if not isinstance(x, MaskedArray):
x = asanyarray(x)
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask...
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that ...
# ... all of them are True, and then check for dm.any()
# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays])
# if shrink and not dm.any():
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
#......................................
def left_shift (a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift (a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
#......................................
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): #, mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
#We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
#We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
#................................................
def rank(obj):
"maskedarray version of the numpy function."
return np.rank(getdata(obj))
rank.__doc__ = np.rank.__doc__
#
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
#
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
#................................................
#####--------------------------------------------------------------------------
#---- --- Extra functions ---
#####--------------------------------------------------------------------------
def where (condition, x=None, y=None):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print x
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print np.ma.where(x > 5, x, -3.1416)
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
if x is None and y is None:
return filled(condition, 0).nonzero()
elif x is None or y is None:
raise ValueError("Either both or neither x and y should be given.")
# Get the condition ...............
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data ......................................
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
_data = d._data
np.copyto(_data, xv.astype(ndtype), where=fc)
np.copyto(_data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
_mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(_mask, getmask(x), where=fc)
np.copyto(_mask, getmask(y), where=notfc)
_mask |= getmaskarray(condition)
if not _mask.any():
d._mask = nomask
return d
def choose (indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask (x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask (x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices......
c = filled(indices, 0)
# Get the masks........
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices......
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal (a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
#..............................................................................
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
#####--------------------------------------------------------------------------
#---- --- Pickling ---
#####--------------------------------------------------------------------------
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return pickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
raise NotImplementedError("Not yet implemented. Sorry")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
#
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
#
def __call__(self, a, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(a, *args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
###############################################################################
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
|
from inspect import isfunction
import copy
import numpy as np
import os
from camera import Camera
from display import Display
from lay import Lay
from material import Material
from transform import *
class Space(object):
def __init__(self):
self.vertex_number = 0
self.vertex_list_coordinate = [np.array([])]
self.vertex_list_normal_vector = [np.array([])]
self.vertex_list_polygon_list = [[]]
self.vertex_list_geometry_index = [0]
self.polygon_number = 0
self.polygon_list_vertex_list = [[]]
self.polygon_list_normal_vector = [np.array([])]
self.polygon_list_edge_list_vertex_index = [[[]]]
self.polygon_list_geometry_index = [0]
self.geometry_number = 0
self.geometry_list_material = [Material()]
def append_by_file(self, file_path):
# offset
vertex_index_offset = self.vertex_number
polygon_index_offset = self.polygon_number
# read file
with open(os.path.split(os.path.realpath(__file__))[0] + os.sep + file_path) as file:
line_list = file.readlines()
# get number of vertex and polygon
line = line_list[0].split()
self.vertex_number = int(line[1])
self.polygon_number = int(line[2])
# total
vertex_total_number = self.vertex_number + vertex_index_offset
polygon_total_number = self.polygon_number + polygon_index_offset
# geometry
material = Material()
material.set_by_file(file_path + '.material.p')
self.geometry_list_material.append(material)
self.geometry_number += 1
# vertex
for i in range(1, 1 + self.vertex_number):
try:
line = line_list[i].split()
# append `vertex_list_geometry_index`
self.vertex_list_geometry_index.append(self.geometry_number)
# append `vertex_list_coordinate`
self.vertex_list_coordinate.append(np.array([float(line[0]), float(line[1]), float(line[2])]))
# append `vertex_list_polygon_list`
self.vertex_list_polygon_list.append([])
except Exception as e:
print('An error occurs at line ' + str(i + 1) + ' in file "' + file_path + '".')
raise e
# polygon
for i in range(1 + self.vertex_number, 1 + self.vertex_number + self.polygon_number):
try:
line = line_list[i].split()
# append `polygon_list_geometry_index`
self.polygon_list_geometry_index.append(self.geometry_number)
# append `polygon_list_vertex_list`
polygon_vertex_index = []
for j in range(1, len(line)):
vertex_index = int(line[j]) + vertex_index_offset
if vertex_index <= vertex_index_offset or vertex_index > vertex_total_number:
raise Exception('The index of vertex is out of range.')
polygon_vertex_index.append(vertex_index)
# set corresponding item in `vertex_list_polygon_list`
self.vertex_list_polygon_list[vertex_index].append(len(self.polygon_list_vertex_list))
self.polygon_list_vertex_list.append(polygon_vertex_index)
# append `polygon_list_edge_list_vertex_index`
polygon_edge_list_vertex_index = []
for j in range(0, len(polygon_vertex_index)):
polygon_edge_list_vertex_index.append([polygon_vertex_index[j - 1], polygon_vertex_index[j]])
self.polygon_list_edge_list_vertex_index.append(polygon_edge_list_vertex_index)
# append `polygon_list_normal_vector`
if len(polygon_vertex_index) < 3:
raise Exception('A polygon must contain at least 3 vertices.')
#
# Data description (of ".d" file) says:
#
# "polygons given by: number of points in the polygon followed by vertex number in clockwise order
# (when looking from outside the object)"
#
# It means that:
#
# If using (p0->p1) x (p1->p2) to calculate the normal vector n of the polygon,
# n will points to the INSIDE of the geometry.
#
# However, n should point to the OUTSIDE in this graphic system.
# As a result, using (p1->p2) x (p0->p1) to calculate the normal vector n.
#
p0p1 = self.vertex_list_coordinate[self.polygon_list_vertex_list[-1][1]] \
- self.vertex_list_coordinate[self.polygon_list_vertex_list[-1][0]]
p1p2 = self.vertex_list_coordinate[self.polygon_list_vertex_list[-1][2]] \
- self.vertex_list_coordinate[self.polygon_list_vertex_list[-1][1]]
self.polygon_list_normal_vector.append(np.cross(p1p2, p0p1))
except Exception as e:
print('An error occurs at line ' + str(i + 1) + ' in file "' + file_path + '".')
raise e
# vertex: normal vector
for i in range(1 + vertex_index_offset, 1 + vertex_total_number):
vertex_normal_vector = np.array([0.0, 0.0, 0.0])
for j in range(0, len(self.vertex_list_polygon_list[i])):
vertex_normal_vector += self.polygon_list_normal_vector[self.vertex_list_polygon_list[i][j]]
self.vertex_list_normal_vector.append(vertex_normal_vector)
# convert all coordinates and vectors from 'v3' to 'mr4c1'
for i in range(1 + vertex_index_offset, 1 + vertex_total_number):
self.vertex_list_coordinate[i] = v3_to_mr4c1(self.vertex_list_coordinate[i])
self.vertex_list_normal_vector[i] = v3_to_mr4c1(self.vertex_list_normal_vector[i], True)
for i in range(1 + polygon_index_offset, 1 + polygon_total_number):
self.polygon_list_normal_vector[i] = v3_to_mr4c1(self.polygon_list_normal_vector[i], True)
# update `self.vertex_number` and `polygon_number`
self.vertex_number = vertex_total_number
self.polygon_number = polygon_total_number
def append_by_space(self, space):
if not isinstance(space, Space):
raise Exception('There is a type error in parameter `space`.')
# append `vertex_list_coordinate`
self.vertex_list_coordinate += space.vertex_list_coordinate[1:]
# append `vertex_list_normal_vector`
self.vertex_list_normal_vector += space.vertex_list_normal_vector[1:]
# append `vertex_list_polygon_list`
for i in range(1, len(space.vertex_list_polygon_list)):
vector_polygon_list = []
for j in range(0, len(space.vertex_list_polygon_list[i])):
vector_polygon_list.append(space.vertex_list_polygon_list[i][j] + self.polygon_number)
self.vertex_list_polygon_list.append(vector_polygon_list)
# append `polygon_list_normal_vector`
self.polygon_list_normal_vector += space.polygon_list_normal_vector[1:]
# append `polygon_list_vertex_list`
for i in range(1, len(space.polygon_list_vertex_list)):
vector_polygon_list = []
for j in range(0, len(space.polygon_list_vertex_list[i])):
vector_polygon_list.append(space.polygon_list_vertex_list[i][j] + self.vertex_number)
self.polygon_list_vertex_list.append(vector_polygon_list)
# append `polygon_list_edge_list_vertex_index`
for i in range(1, len(space.polygon_list_edge_list_vertex_index)):
polygon_edge_list_vertex_index = []
for j in range(0, len(space.polygon_list_edge_list_vertex_index[i])):
edge = []
for k in range(0, len(space.polygon_list_edge_list_vertex_index[i][j])):
vertex_index = space.polygon_list_edge_list_vertex_index[i][j][k] + self.vertex_number
edge.append(vertex_index)
polygon_edge_list_vertex_index.append(edge)
self.polygon_list_edge_list_vertex_index.append(polygon_edge_list_vertex_index)
# append `geometry_list_material`
for i in range(1, len(space.geometry_list_material)):
self.geometry_list_material.append(copy.deepcopy(space.geometry_list_material[i]))
# append `vertex_list_geometry_index`
for i in range(1, len(space.vertex_list_geometry_index)):
self.vertex_list_geometry_index.append(space.vertex_list_geometry_index[i] + self.geometry_number)
# append `polygon_list_geometry_index`
for i in range(1, len(space.polygon_list_geometry_index)):
self.polygon_list_geometry_index.append(space.polygon_list_geometry_index[i] + self.geometry_number)
# update `self.vertex_number`, `polygon_number` and `geometry_number`
self.vertex_number = len(self.vertex_list_coordinate) - 1
self.polygon_number = len(self.polygon_list_vertex_list) - 1
self.geometry_number = len(self.geometry_list_material) - 1
def transform(self, trans_func, trans_func_2nd_param):
if not isfunction(trans_func):
raise Exception('Parameter `trans_func` must be a function.')
if not (isinstance(trans_func_2nd_param, Lay)
or isinstance(trans_func_2nd_param, Camera)
or isinstance(trans_func_2nd_param, Display)):
raise Exception('There is a type error in parameter `trans_func_2nd_param`.')
for i in range(1, len(self.vertex_list_coordinate)):
self.vertex_list_coordinate[i] = \
trans_func(self.vertex_list_coordinate[i], trans_func_2nd_param)
self.vertex_list_normal_vector[i] = \
trans_func(self.vertex_list_normal_vector[i], trans_func_2nd_param, True)
for i in range(1, len(self.polygon_list_normal_vector)):
self.polygon_list_normal_vector[i] = \
trans_func(self.polygon_list_normal_vector[i], trans_func_2nd_param, True)
|
import React, {useState} from 'react';
import { useSpring, animated } from 'react-spring';
import './App.css';
import reactSpring from 'react-spring';
function App() {
const props = useSpring({ to: {opacity: 1}, from: { opacity: 0 } })
const [flip, set] = useState(false)
const props1 = useSpring({
to: { opacity: 1 },
from: { opacity: 0 },
reset: true,
reverse: flip,
delay: 200,
onRest: () => set(!flip),
})
return (
<div>
<animated.h1 style={props1}>hello</animated.h1>
<animated.div style={props}>I will fade in!</animated.div>
</div>
);
}
export default App;
|
from django.urls import path
from .views import home, load_static, about_us, our_client, link, contact_us, join_us
app_name = "others"
urlpatterns = [
path("about-us", about_us, name="about_us"),
path("our_client", our_client, name="our_client"),
path("link", link, name="link"),
path("contact-us", contact_us, name="contact_us"),
path('join-us', join_us, name="join_us"),
path("static/<path:filepath>", load_static, name="static"),
path("", home, name="home"),
]
|
"""Parser for Cozy syntax.
The important functions are:
- parse_spec: str -> Spec
- parse_stm: str -> Stm
- parse_exp: str -> Exp
"""
# builtin
import re
import sys
import ast
import inspect
# 3rd party
from ply import lex, yacc
# ours
from cozy import parsetools
from cozy import syntax
from cozy.syntax_tools import pprint
# Each keyword becomes a KW_* token for the lexer. So, e.g. "and" becomes
# KW_AND.
_KEYWORDS = ([
"extern",
"type",
"handletype",
"enum",
"private",
"op",
"query",
"state",
"assume",
"invariant",
"true",
"false",
"min", "argmin",
"max", "argmax",
"if",
"else",
"let",
"Native"] +
list(syntax.UOps) +
list(syntax.BOps))
# Each operator has a name and a syntax in a Cozy specification. Each
# becomes an OP_* token for the lexer. So, e.g. ("ASSIGN", "=") matches "="
# and the token will be named OP_ASSIGN.
_OPERATORS = [
("ASSIGN", "="),
("IMPLIES", "=>"),
("GE", ">="),
("LE", "<="),
("GT", ">"),
("LT", "<"),
("EQ", "=="),
("NE", "!="),
("PLUS", "+"),
("MINUS", "-"),
("TIMES", "*"),
("QUESTION", "?"),
("COLON", ":"),
("SEMICOLON", ";"),
("COMMA", ","),
("OPEN_BRACE", "{"),
("CLOSE_BRACE", "}"),
("OPEN_PAREN", "("),
("CLOSE_PAREN", ")"),
("OPEN_BRACKET", "["),
("CLOSE_BRACKET", "]"),
("DOT", "."),
("LEFT_ARROW", "<-"),
("RIGHT_ARROW", "->"),
("VBAR", "|")
]
def report_lex_error(line, message):
print("on line {}: {}".format(line, message), file=sys.stderr)
sys.exit(1)
def report_parse_error(where, message):
print("at {}: {}".format(pprint(where), message), file=sys.stderr)
sys.exit(1)
# Lexer ########################################################################
def keyword_token_name(kw):
return "KW_{}".format(kw.upper())
def op_token_name(opname):
return "OP_{}".format(opname.upper())
# Enumerate token names
tokens = []
for kw in _KEYWORDS:
tokens.append(keyword_token_name(kw))
for opname, op in _OPERATORS:
tokens.append(op_token_name(opname))
tokens += ["WORD", "NUM", "FLOAT", "STRINGLITERAL", "EXTERNCODETOKEN", "DOCCOMMENT"]
tokens = tuple(tokens) # freeze tokens
def make_lexer():
# *sigh*... ply has such a weird interface. It magically discovers your
# token rules by looking at all in-scope variables (which are either
# functions with regexes for docstring or plain old regex objects). This
# code programmatically sets up some productions by writing directly to
# the `locals()` dictionary. There might be a cleaner way to
# programmatically produce token productions, but I don't know what it is.
for kw in _KEYWORDS:
locals()["t_{}".format(keyword_token_name(kw))] = re.escape(kw)
for opname, op in _OPERATORS:
locals()["t_{}".format(op_token_name(opname))] = re.escape(op)
def t_WORD(t):
r"[a-zA-Z_]\w*"
if t.value in _KEYWORDS:
# I wish I knew why I needed this. :(
t.type = keyword_token_name(t.value)
return t
def t_COMMENT(t):
r"//[^\n]*"
pass
def t_DOCCOMMENT(t):
r"/\*\*(?:(?!\*/)(.|\n))*\*/"
# Normalize the doc comment, removing leading indentation/etc.
t.value = inspect.cleandoc(t.value)
return t
def t_MULTILINECOMMENT(t):
r"/\* (?:(?!\*/) (.|\n) )* \*/"
pass
def t_FLOAT(t):
r"(\d+(\.\d+)?[fF])"
# ".1" not doable since it would create ambiguity w/ foo.1 syntax.
t.value = syntax.ENum(float(t.value.rstrip("fF"))).with_type(syntax.TFloat())
return t
def t_NUM(t):
r"\d+(l|L)?"
if t.value.lower().endswith("l"):
t.value = syntax.ENum(int(t.value[:-1])).with_type(syntax.TLong())
else:
t.value = syntax.ENum(int(t.value)).with_type(syntax.TInt())
return t
def t_STRINGLITERAL(t):
r'"([^\\"]|\\.)*"'
t.value = ast.literal_eval(t.value)
return t
def t_EXTERNCODETOKEN(t):
r"\{\{(.|\n)*?\}\}"
t.lexer.lineno += t.value.count("\n")
t.value = t.value[2:-2]
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(t):
report_lex_error(t.lexer.lineno, "Illegal character {}".format(repr(t.value[0])))
return lex.lex()
_lexer = make_lexer()
def tokenize(s):
lexer = _lexer.clone() # Because lexer objects are stateful
lexer.input(s)
while True:
tok = lexer.token()
if not tok:
break
yield tok
# Parser #######################################################################
def make_parser():
start = "spec"
def p_spec(p):
"""spec : externcode doccomment WORD OP_COLON typedecls funcdecls states invariants methods externcode"""
p[0] = syntax.Spec(p[3], p[5], p[6], p[7], p[8], p[9], p[1], p[10], p[2])
def p_doccomment(p):
"""doccomment :
| DOCCOMMENT"""
p[0] = p[1] if len(p) > 1 else ""
def p_externcode(p):
"""externcode :
| EXTERNCODETOKEN"""
p[0] = p[1] if len(p) > 1 else ""
parsetools.multi(locals(), "typedecls", "typedecl")
def p_typedecl(p):
"""typedecl : KW_TYPE WORD OP_ASSIGN type
| KW_HANDLETYPE WORD OP_ASSIGN type"""
if p[1] == "type":
p[0] = (p[2], p[4])
elif p[1] == "handletype":
p[0] = (p[2], syntax.THandle(p[2], p[4]))
def p_type(p):
"""type : WORD
| WORD OP_LT type OP_GT
| OP_OPEN_BRACE typednames OP_CLOSE_BRACE
| KW_ENUM OP_OPEN_BRACE enum_cases OP_CLOSE_BRACE
| OP_OPEN_PAREN typelist OP_CLOSE_PAREN
| KW_NATIVE STRINGLITERAL"""
if len(p) == 2:
p[0] = syntax.TNamed(p[1])
elif len(p) == 3:
p[0] = syntax.TNative(p[2])
elif len(p) == 5:
if p[1] == "enum":
p[0] = syntax.TEnum(p[3])
else:
p[0] = syntax.TApp(p[1], p[3])
elif len(p) == 4:
if p[1] == "{":
p[0] = syntax.TRecord(p[2])
elif p[1] == "(":
p[0] = syntax.TTuple(p[2])
parsetools.multi(locals(), "enum_cases", "WORD", sep="OP_COMMA")
def p_typedname(p):
"""typedname : WORD OP_COLON type"""
p[0] = (p[1], p[3])
parsetools.multi(locals(), "typednames", "typedname", sep="OP_COMMA")
parsetools.multi(locals(), "typelist", "type", sep="OP_COMMA")
def p_func(p):
"""func : KW_EXTERN WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN OP_COLON type OP_ASSIGN STRINGLITERAL"""
p[0] = syntax.ExternFunc(p[2], p[4], p[7], p[9])
parsetools.multi(locals(), "funcdecls", "func")
def p_statevar(p):
"""statevar : KW_STATE WORD OP_COLON type"""
p[0] = (p[2], p[4])
parsetools.multi(locals(), "states", "statevar")
def p_assume(p):
"""assume : KW_ASSUME exp OP_SEMICOLON"""
p[0] = p[2]
def p_invariant(p):
"""invariant : KW_INVARIANT exp OP_SEMICOLON"""
p[0] = p[2]
parsetools.multi(locals(), "assumes", "assume")
parsetools.multi(locals(), "invariants", "invariant")
precedence = (
("nonassoc", "KW_ELSE", "OP_COLON"),
("left", "OP_SEMICOLON"),
("left", "OP_COMMA"),
("left", "OP_QUESTION"),
("left", "OP_IMPLIES"),
("left", "KW_AND", "KW_OR"),
("left", "OP_EQ", "OP_NE", "OP_LT", "OP_LE", "OP_GT", "OP_GE"),
("left", "OP_PLUS", "OP_MINUS"),
("left", "OP_TIMES"),
("left", "KW_IN"),
("left", "KW_NOT", "KW_DISTINCT", "KW_UNIQUE", "KW_EMPTY", "KW_EXISTS", "KW_THE", "KW_MIN", "KW_MAX", "KW_ARGMIN", "KW_ARGMAX", "KW_SUM", "KW_ANY", "KW_ALL", "KW_LEN", "KW_REVERSED"),
("left", "OP_OPEN_BRACKET"),
("left", "OP_OPEN_PAREN"),
("left", "OP_DOT"),
("left", "KW_OP", "KW_QUERY", "KW_PRIVATE"))
def p_exp_strlit(p):
"""exp : STRINGLITERAL"""
p[0] = syntax.EStr(p[1])
def p_lambda(p):
"""lambda : OP_OPEN_BRACE WORD OP_RIGHT_ARROW exp OP_CLOSE_BRACE"""
p[0] = syntax.ELambda(syntax.EVar(p[2]), p[4])
def p_slice(p):
"""slice : exp
| exp OP_COLON
| OP_COLON exp
| exp OP_COLON exp"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
if p[1] == ":":
p[0] = (None, p[2])
elif p[2] == ":":
p[0] = (p[1], None)
elif len(p) == 4:
p[0] = (p[1], p[3])
def p_exp(p):
"""exp : NUM
| FLOAT
| WORD
| WORD OP_OPEN_PAREN exp_list OP_CLOSE_PAREN
| KW_TRUE
| KW_FALSE
| exp OP_PLUS exp
| exp OP_MINUS exp
| exp OP_TIMES exp
| exp OP_EQ exp
| exp OP_NE exp
| exp OP_LT exp
| exp OP_LE exp
| exp OP_GT exp
| exp OP_GE exp
| exp KW_AND exp
| exp KW_OR exp
| exp OP_IMPLIES exp
| exp OP_QUESTION exp OP_COLON exp
| exp OP_OPEN_BRACKET slice OP_CLOSE_BRACKET
| KW_NOT exp
| OP_MINUS exp
| exp KW_IN exp
| KW_UNIQUE exp
| KW_DISTINCT exp
| KW_EMPTY exp
| KW_THE exp
| KW_MIN exp
| KW_MAX exp
| KW_ARGMIN lambda exp
| KW_ARGMAX lambda exp
| KW_SUM exp
| KW_LEN exp
| KW_ANY exp
| KW_ALL exp
| KW_EXISTS exp
| KW_REVERSED exp
| exp OP_DOT NUM
| exp OP_DOT WORD
| OP_OPEN_PAREN exp_list OP_CLOSE_PAREN
| OP_OPEN_BRACE record_fields OP_CLOSE_BRACE
| OP_OPEN_BRACKET exp OP_CLOSE_BRACKET
| OP_OPEN_BRACKET exp OP_VBAR comprehension_body OP_CLOSE_BRACKET"""
if len(p) == 2:
if type(p[1]) is syntax.ENum:
p[0] = p[1]
elif p[1] == "true":
p[0] = syntax.EBool(True)
elif p[1] == "false":
p[0] = syntax.EBool(False)
else:
p[0] = syntax.EVar(p[1])
elif len(p) == 3:
if p[1] == "min":
p[0] = syntax.EArgMin(p[2], syntax.ELambda(syntax.EVar("x"), syntax.EVar("x")))
elif p[1] == "max":
p[0] = syntax.EArgMax(p[2], syntax.ELambda(syntax.EVar("x"), syntax.EVar("x")))
else:
p[0] = syntax.EUnaryOp(p[1], p[2])
elif len(p) == 4:
if p[1] == "(":
exps = p[2]
if len(exps) == 0:
raise Exception("illegal ()")
elif len(exps) == 1:
p[0] = exps[0]
elif len(exps) > 1:
p[0] = syntax.ETuple(tuple(exps))
elif p[1] == "[":
p[0] = syntax.ESingleton(p[2])
elif p[1] == "{":
p[0] = syntax.EMakeRecord(p[2])
elif p[2] == ".":
if isinstance(p[3], syntax.ENum):
p[0] = syntax.ETupleGet(p[1], p[3].val)
else:
p[0] = syntax.EGetField(p[1], p[3])
elif p[1] == "argmin":
p[0] = syntax.EArgMin(p[3], p[2])
elif p[1] == "argmax":
p[0] = syntax.EArgMax(p[3], p[2])
else:
p[0] = syntax.EBinOp(p[1], p[2], p[3])
else:
if p[2] == "?":
p[0] = syntax.ECond(p[1], p[3], p[5])
elif p[2] == "[":
if isinstance(p[3], syntax.Exp):
p[0] = syntax.EListGet(p[1], p[3])
elif isinstance(p[3], tuple):
start = p[3][0]
end = p[3][1]
if start is None:
start = syntax.ZERO
if end is None:
end = syntax.ELen(p[1])
p[0] = syntax.EListSlice(p[1], start, end)
elif p[1] == "[":
p[0] = syntax.EListComprehension(p[2], p[4])
elif p[2] == "(":
p[0] = syntax.ECall(p[1], p[3])
else:
assert False, "unknown case: {}".format(repr(p[1:]))
parsetools.multi(locals(), "exp_list", "exp", sep="OP_COMMA")
def p_record_field(p):
"""record_field : WORD OP_COLON exp"""
p[0] = (p[1], p[3])
parsetools.multi(locals(), "record_fields", "record_field", sep="OP_COMMA")
def p_comprehension_clause(p):
"""comprehension_clause : WORD OP_LEFT_ARROW exp
| exp"""
if len(p) == 2:
p[0] = syntax.CCond(p[1])
else:
p[0] = syntax.CPull(p[1], p[3])
parsetools.multi(locals(), "comprehension_body", "comprehension_clause", sep="OP_COMMA")
def p_accesschain(p):
"""accesschain : WORD
| accesschain OP_DOT WORD"""
if len(p) > 2:
p[0] = syntax.EGetField(p[1], p[3])
else:
p[0] = syntax.EVar(p[1])
def p_visibility(p):
"""visibility :
| KW_PRIVATE"""
if len(p) > 1:
p[0] = syntax.Visibility.Private
else:
p[0] = syntax.Visibility.Public
def p_method(p):
"""method : doccomment KW_OP WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN assumes stm
| doccomment visibility KW_QUERY WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN assumes exp"""
if p[2] == "op":
p[0] = syntax.Op(p[3], p[5], p[7], p[8], p[1])
else:
p[0] = syntax.Query(p[4], p[2], p[6], p[8], p[9], p[1])
parsetools.multi(locals(), "methods", "method")
def p_maybeelse(p):
"""maybeelse :
| KW_ELSE block"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = syntax.SNoOp()
def p_block(p):
"""block : OP_OPEN_BRACE stm OP_CLOSE_BRACE"""
p[0] = p[2]
def p_basicstm(p):
"""basicstm : accesschain OP_OPEN_PAREN exp_list OP_CLOSE_PAREN OP_SEMICOLON
| accesschain OP_ASSIGN exp OP_SEMICOLON
| KW_IF exp block maybeelse
| KW_LET WORD OP_ASSIGN exp OP_SEMICOLON"""
if p[1] == "if":
p[0] = syntax.SIf(p[2], p[3], p[4])
elif p[1] == "let":
p[0] = syntax.SDecl(p[2], p[4])
elif p[2] == "(":
if not isinstance(p[1], syntax.EGetField):
report_parse_error(p[1], "Method calls must have the form `target.method(...)`")
p[0] = syntax.SCall(p[1].e, p[1].f, p[3])
else:
p[0] = syntax.SAssign(p[1], p[3])
def p_stm(p):
"""stm :
| basicstm stm"""
if len(p) > 1:
if isinstance(p[2], syntax.SNoOp):
p[0] = p[1]
else:
p[0] = syntax.SSeq(p[1], p[2])
else:
p[0] = syntax.SNoOp()
def p_empty(p):
'empty :'
pass
def p_error(p):
if p is None:
raise Exception("Unexpected end-of-file")
raise Exception("Syntax error on line {} at {}".format(p.lineno, p))
return yacc.yacc()
_parser = make_parser()
def parse_spec(s):
"""Parse a string as a Cozy specification."""
parser = _parser
return parser.parse(s, lexer=_lexer)
def parse_stm(s) -> syntax.Stm:
"""Parse a string as a statement."""
return parse_spec("X: op f() " + s).methods[0].body
def parse_exp(s) -> syntax.Exp:
"""Parse a string as an expression."""
return parse_spec("X: query f() " + s).methods[0].ret
|
# -*- coding: latin-1 -*-
"""This file contains the public interface to the aiml module."""
from __future__ import print_function
import copy
import glob
import os
import random
import re
import string
import sys
import time
import threading
import xml.sax
from collections import namedtuple
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
from .constants import *
from . import DefaultSubs
from . import Utils
from .AimlParser import create_parser
from .PatternMgr import PatternMgr
from .WordSub import WordSub
from pathlib import Path
from logging import DEBUG, StreamHandler, getLogger, root
log = getLogger(__name__)
root.addHandler(StreamHandler(sys.stderr))
root.setLevel(DEBUG)
def msg_encoder( encoding=None ):
"""
Return a named tuple with a pair of functions to encode/decode messages.
For None encoding, a passthrough function will be returned
"""
Codec = namedtuple( 'Codec', ['enc','dec'] )
if encoding in (None,False):
l = lambda x : unicode(x)
return Codec(l,l)
else:
return Codec(lambda x : x.encode(encoding,'replace'),
lambda x : x.decode(encoding,'replace') )
class Kernel:
# module constants
_globalSessionID = "_global" # key of the global session (duh)
_maxHistorySize = 10 # maximum length of the _inputs and _responses lists
_maxRecursionDepth = 100 # maximum number of recursive <srai>/<sr> tags before the response is aborted.
# special predicate keys
_inputHistory = "_inputHistory" # keys to a queue (list) of recent user input
_outputHistory = "_outputHistory" # keys to a queue (list) of recent responses.
_inputStack = "_inputStack" # Should always be empty in between calls to respond()
def __init__(self, sessions=None):
self._verboseMode = True
self._version = "python-aiml {}".format(VERSION)
self._brain = PatternMgr()
self._respondLock = threading.RLock()
self.setTextEncoding( None if PY3 else "utf-8" )
# set up the sessions
self._sessions = sessions if sessions is not None else {}
self._addSession(self._globalSessionID)
# Set up the bot predicates
self._botPredicates = {}
self.setBotPredicate("name", "Nameless")
# set up the word substitutors (subbers):
self._subbers = {}
self._subbers['gender'] = WordSub(DefaultSubs.defaultGender)
self._subbers['person'] = WordSub(DefaultSubs.defaultPerson)
self._subbers['person2'] = WordSub(DefaultSubs.defaultPerson2)
self._subbers['normal'] = WordSub(DefaultSubs.defaultNormal)
# set up the element processors
self._elementProcessors = {
"bot": self._processBot,
"condition": self._processCondition,
"date": self._processDate,
"formal": self._processFormal,
"gender": self._processGender,
"get": self._processGet,
"gossip": self._processGossip,
"id": self._processId,
"input": self._processInput,
"javascript": self._processJavascript,
"learn": self._processLearn,
"li": self._processLi,
"lowercase": self._processLowercase,
"person": self._processPerson,
"person2": self._processPerson2,
"random": self._processRandom,
"text": self._processText,
"sentence": self._processSentence,
"set": self._processSet,
"size": self._processSize,
"sr": self._processSr,
"srai": self._processSrai,
"star": self._processStar,
"system": self._processSystem,
"template": self._processTemplate,
"that": self._processThat,
"thatstar": self._processThatstar,
"think": self._processThink,
"topicstar": self._processTopicstar,
"uppercase": self._processUppercase,
"version": self._processVersion,
}
def bootstrap(self, brainFile = None, learnFiles = [], commands = [],
chdir=None):
"""Prepare a Kernel object for use.
If a `brainFile` argument is provided, the Kernel attempts to
load the brain at the specified filename.
If `learnFiles` is provided, the Kernel attempts to load the
specified AIML files.
Finally, each of the input strings in the `commands` list is
passed to respond().
The `chdir` argument makes the it change to that directory before
performing any learn or command execution (but after loadBrain
processing). Upon returning the current directory is moved back to
where it was before.
"""
start = time.monotonic()
if brainFile:
self.loadBrain(brainFile)
prev = os.getcwd()
try:
if chdir:
os.chdir( chdir )
# learnFiles might be a string, in which case it should be
# turned into a single-element list.
if isinstance( learnFiles, (str,unicode) ):
learnFiles = (learnFiles,)
if not learnFiles:
directory = Path(__file__).parent
learnFiles = [
f.as_posix()
for f in
directory.glob("**/*.aiml")
if "cn" not in f.stem
]
log.debug("Loading %d files", len(learnFiles))
for file in learnFiles:
self.learn(file)
# ditto for commands
if isinstance( commands, (str,unicode) ):
commands = (commands,)
for cmd in commands:
print( self._respond(cmd, self._globalSessionID) )
finally:
if chdir:
os.chdir( prev )
if self._verboseMode:
print( "Kernel bootstrap completed in %.2f seconds" % (time.monotonic() - start) )
def verbose(self, isVerbose = True):
"""Enable/disable verbose output mode."""
self._verboseMode = isVerbose
def version(self):
"""Return the Kernel's version string."""
return self._version
def numCategories(self):
"""Return the number of categories the Kernel has learned."""
# there's a one-to-one mapping between templates and categories
return self._brain.numTemplates()
def resetBrain(self):
"""Reset the brain to its initial state.
This is essentially equivilant to:
del(kern)
kern = aiml.Kernel()
"""
del(self._brain)
self.__init__()
def loadBrain(self, filename):
"""Attempt to load a previously-saved 'brain' from the
specified filename.
NOTE: the current contents of the 'brain' will be discarded!
"""
if self._verboseMode: print( "Loading brain from %s..." % filename, end="" )
start = time.monotonic()
self._brain.restore(filename)
if self._verboseMode:
end = time.monotonic() - start
print( "done (%d categories in %.2f seconds)" % (self._brain.numTemplates(), end) )
def saveBrain(self, filename):
"""Dump the contents of the bot's brain to a file on disk."""
if self._verboseMode: print( "Saving brain to %s..." % filename, end="")
start = time.monotonic()
self._brain.save(filename)
if self._verboseMode:
print( "done (%.2f seconds)" % (time.monotonic() - start) )
def getPredicate(self, name, sessionID = _globalSessionID):
"""Retrieve the current value of the predicate 'name' from the
specified session.
If name is not a valid predicate in the session, the empty
string is returned.
"""
try: return self._sessions[sessionID][name]
except KeyError: return ""
def setPredicate(self, name, value, sessionID = _globalSessionID):
"""Set the value of the predicate 'name' in the specified
session.
If sessionID is not a valid session, it will be created. If
name is not a valid predicate in the session, it will be
created.
"""
self._addSession(sessionID) # add the session, if it doesn't already exist.
self._sessions[sessionID][name] = value
def getBotPredicate(self, name):
"""Retrieve the value of the specified bot predicate.
If name is not a valid bot predicate, the empty string is returned.
"""
try: return self._botPredicates[name]
except KeyError: return ""
def setBotPredicate(self, name, value):
"""Set the value of the specified bot predicate.
If name is not a valid bot predicate, it will be created.
"""
self._botPredicates[name] = value
# Clumsy hack: if updating the bot name, we must update the
# name in the brain as well
if name == "name":
self._brain.setBotName(self.getBotPredicate("name"))
def setTextEncoding(self, encoding ):
"""
Set the I/O text encoding expected. All strings loaded from AIML files
will be converted to it.
The respond() method is expected to be passed strings encoded with it
(str in Py2, bytes in Py3) and will also return them.
If it is False, then strings are assumed *not* to be encoded, i.e.
they will be unicode strings (unicode in Py2, str in Py3)
"""
self._textEncoding = encoding
self._cod = msg_encoder( encoding )
def loadSubs(self, filename):
"""Load a substitutions file.
The file must be in the Windows-style INI format (see the
standard ConfigParser module docs for information on this
format). Each section of the file is loaded into its own
substituter.
"""
inFile = file(filename)
parser = ConfigParser()
parser.readfp(inFile, filename)
inFile.close()
for s in parser.sections():
# Add a new WordSub instance for this section. If one already
# exists, delete it.
if s in self._subbers:
del(self._subbers[s])
self._subbers[s] = WordSub()
# iterate over the key,value pairs and add them to the subber
for k,v in parser.items(s):
self._subbers[s][k] = v
def _addSession(self, sessionID):
"""Create a new session with the specified ID string."""
if sessionID in self._sessions:
return
# Create the session.
self._sessions[sessionID] = {
# Initialize the special reserved predicates
self._inputHistory: [],
self._outputHistory: [],
self._inputStack: []
}
def _deleteSession(self, sessionID):
"""Delete the specified session."""
if sessionID in self._sessions:
self._sessions.pop(sessionID)
def getSessionData(self, sessionID = None):
"""Return a copy of the session data dictionary for the
specified session.
If no sessionID is specified, return a dictionary containing
*all* of the individual session dictionaries.
"""
s = None
if sessionID is not None:
try: s = self._sessions[sessionID]
except KeyError: s = {}
else:
s = self._sessions
return copy.deepcopy(s)
def learn(self, filename):
"""Load and learn the contents of the specified AIML file.
If filename includes wildcard characters, all matching files
will be loaded and learned.
"""
for f in glob.glob(filename):
if self._verboseMode: print( "Loading %s..." % f, end="")
start = time.monotonic()
# Load and parse the AIML file.
parser = create_parser()
handler = parser.getContentHandler()
handler.setEncoding(self._textEncoding)
try: parser.parse(f)
except xml.sax.SAXParseException as msg:
err = "\nFATAL PARSE ERROR in file %s:\n%s\n" % (f,msg)
sys.stderr.write(err)
continue
# store the pattern/template pairs in the PatternMgr.
for key,tem in handler.categories.items():
self._brain.add(key,tem)
# Parsing was successful.
if self._verboseMode:
print( "done (%.2f seconds)" % (time.monotonic() - start) )
def respond(self, input_, sessionID = _globalSessionID):
"""Return the Kernel's response to the input string."""
if len(input_) == 0:
return u""
# Decode the input (assumed to be an encoded string) into a unicode
# string. Note that if encoding is False, this will be a no-op
try: input_ = self._cod.dec(input_)
except UnicodeError: pass
except AttributeError: pass
# prevent other threads from stomping all over us.
self._respondLock.acquire()
try:
# Add the session, if it doesn't already exist
self._addSession(sessionID)
# split the input into discrete sentences
sentences = Utils.sentences(input_)
finalResponse = u""
for s in sentences:
# Add the input to the history list before fetching the
# response, so that <input/> tags work properly.
inputHistory = self.getPredicate(self._inputHistory, sessionID)
inputHistory.append(s)
while len(inputHistory) > self._maxHistorySize:
inputHistory.pop(0)
self.setPredicate(self._inputHistory, inputHistory, sessionID)
# Fetch the response
response = self._respond(s, sessionID)
# add the data from this exchange to the history lists
outputHistory = self.getPredicate(self._outputHistory, sessionID)
outputHistory.append(response)
while len(outputHistory) > self._maxHistorySize:
outputHistory.pop(0)
self.setPredicate(self._outputHistory, outputHistory, sessionID)
# append this response to the final response.
finalResponse += (response + u" ")
finalResponse = finalResponse.strip()
#print( "@ASSERT", self.getPredicate(self._inputStack, sessionID))
assert(len(self.getPredicate(self._inputStack, sessionID)) == 0)
# and return, encoding the string into the I/O encoding
return self._cod.enc(finalResponse)
finally:
# release the lock
self._respondLock.release()
if hasattr(self, "onResponse"):
self.onResponse(s, sessionID, response)
# This version of _respond() just fetches the response for some input.
# It does not mess with the input and output histories. Recursive calls
# to respond() spawned from tags like <srai> should call this function
# instead of respond().
def _respond(self, input_, sessionID):
"""Private version of respond(), does the real work."""
if len(input_) == 0:
return u""
# guard against infinite recursion
inputStack = self.getPredicate(self._inputStack, sessionID)
if len(inputStack) > self._maxRecursionDepth:
if self._verboseMode:
err = u"WARNING: maximum recursion depth exceeded (input='%s')" % self._cod.enc(input_)
sys.stderr.write(err)
return u""
# push the input onto the input stack
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.append(input_)
self.setPredicate(self._inputStack, inputStack, sessionID)
# run the input through the 'normal' subber
subbedInput = self._subbers['normal'].sub(input_)
# fetch the bot's previous response, to pass to the match()
# function as 'that'.
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = outputHistory[-1]
except IndexError: that = ""
subbedThat = self._subbers['normal'].sub(that)
# fetch the current topic
topic = self.getPredicate("topic", sessionID)
subbedTopic = self._subbers['normal'].sub(topic)
# Determine the final response.
response = u""
elem = self._brain.match(subbedInput, subbedThat, subbedTopic)
if elem is None:
if self._verboseMode:
err = "WARNING: No match found for input: %s\n" % self._cod.enc(input_)
sys.stderr.write(err)
else:
# Process the element into a response string.
response += self._processElement(elem, sessionID).strip()
response += u" "
response = response.strip()
# pop the top entry off the input stack.
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.pop()
self.setPredicate(self._inputStack, inputStack, sessionID)
return response
def _processElement(self,elem, sessionID):
"""Process an AIML element.
The first item of the elem list is the name of the element's
XML tag. The second item is a dictionary containing any
attributes passed to that tag, and their values. Any further
items in the list are the elements enclosed by the current
element's begin and end tags; they are handled by each
element's handler function.
"""
try:
handlerFunc = self._elementProcessors[elem[0]]
except:
# Oops -- there's no handler function for this element type!
if self._verboseMode:
err = "WARNING: No handler found for <%s> element\n" % self._cod.enc(elem[0])
sys.stderr.write(err)
return u""
return handlerFunc(elem, sessionID)
######################################################
### Individual element-processing functions follow ###
######################################################
# <bot>
def _processBot(self, elem, sessionID):
"""Process a <bot> AIML element.
Required element attributes:
name: The name of the bot predicate to retrieve.
<bot> elements are used to fetch the value of global,
read-only "bot predicates." These predicates cannot be set
from within AIML; you must use the setBotPredicate() function.
"""
attrName = elem[1]['name']
return self.getBotPredicate(attrName)
# <condition>
def _processCondition(self, elem, sessionID):
"""Process a <condition> AIML element.
Optional element attributes:
name: The name of a predicate to test.
value: The value to test the predicate for.
<condition> elements come in three flavors. Each has different
attributes, and each handles their contents differently.
The simplest case is when the <condition> tag has both a 'name'
and a 'value' attribute. In this case, if the predicate
'name' has the value 'value', then the contents of the element
are processed and returned.
If the <condition> element has only a 'name' attribute, then
its contents are a series of <li> elements, each of which has
a 'value' attribute. The list is scanned from top to bottom
until a match is found. Optionally, the last <li> element can
have no 'value' attribute, in which case it is processed and
returned if no other match is found.
If the <condition> element has neither a 'name' nor a 'value'
attribute, then it behaves almost exactly like the previous
case, except that each <li> subelement (except the optional
last entry) must now include both 'name' and 'value'
attributes.
"""
attr = None
response = ""
attr = elem[1]
# Case #1: test the value of a specific predicate for a
# specific value.
if 'name' in attr and 'value' in attr:
val = self.getPredicate(attr['name'], sessionID)
if val == attr['value']:
for e in elem[2:]:
response += self._processElement(e,sessionID)
return response
else:
# Case #2 and #3: Cycle through <li> contents, testing a
# name and value pair for each one.
try:
name = attr.get('name',None)
# Get the list of <li> elemnents
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
# if listitems is empty, return the empty string
if len(listitems) == 0:
return ""
# iterate through the list looking for a condition that
# matches.
foundMatch = False
for li in listitems:
try:
liAttr = li[1]
# if this is the last list item, it's allowed
# to have no attributes. We just skip it for now.
if len(liAttr) == 0 and li == listitems[-1]:
continue
# get the name of the predicate to test
liName = name
if liName == None:
liName = liAttr['name']
# get the value to check against
liValue = liAttr['value']
# do the test
if self.getPredicate(liName, sessionID) == liValue:
foundMatch = True
response += self._processElement(li,sessionID)
break
except:
# No attributes, no name/value attributes, no
# such predicate/session, or processing error.
if self._verboseMode: print( "Something amiss -- skipping listitem", li )
raise
if not foundMatch:
# Check the last element of listitems. If it has
# no 'name' or 'value' attribute, process it.
try:
li = listitems[-1]
liAttr = li[1]
if not ('name' in liAttr or 'value' in liAttr):
response += self._processElement(li, sessionID)
except:
# listitems was empty, no attributes, missing
# name/value attributes, or processing error.
if self._verboseMode: print( "error in default listitem" )
raise
except:
# Some other catastrophic cataclysm
if self._verboseMode: print( "catastrophic condition failure" )
raise
return response
# <date>
def _processDate(self, elem, sessionID):
"""Process a <date> AIML element.
<date> elements resolve to the current date and time. The
AIML specification doesn't require any particular format for
this information, so I go with whatever's simplest.
"""
log.debug("_processDate({elem=})")
timestamp = time.asctime()
log.debug("_processDate({elem=}): returning {timestamp=}")
return timestamp
# <formal>
def _processFormal(self, elem, sessionID):
"""Process a <formal> AIML element.
<formal> elements process their contents recursively, and then
capitalize the first letter of each word of the result.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.capwords(response)
# <gender>
def _processGender(self,elem, sessionID):
"""Process a <gender> AIML element.
<gender> elements process their contents, and then swap the
gender of any third-person singular pronouns in the result.
This subsitution is handled by the aiml.WordSub module.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return self._subbers['gender'].sub(response)
# <get>
def _processGet(self, elem, sessionID):
"""Process a <get> AIML element.
Required element attributes:
name: The name of the predicate whose value should be
retrieved from the specified session and returned. If the
predicate doesn't exist, the empty string is returned.
<get> elements return the value of a predicate from the
specified session.
"""
name = elem[1]['name']
print("processGet name=", name)
rs = self.getPredicate(name, sessionID)
print("processGet name=", name, " -> ", rs)
return rs
# <gossip>
def _processGossip(self, elem, sessionID):
"""Process a <gossip> AIML element.
<gossip> elements are used to capture and store user input in
an implementation-defined manner, theoretically allowing the
bot to learn from the people it chats with. I haven't
descided how to define my implementation, so right now
<gossip> behaves identically to <think>.
"""
return self._processThink(elem, sessionID)
# <id>
def _processId(self, elem, sessionID):
""" Process an <id> AIML element.
<id> elements return a unique "user id" for a specific
conversation. In PyAIML, the user id is the name of the
current session.
"""
return sessionID
# <input>
def _processInput(self, elem, sessionID):
"""Process an <input> AIML element.
Optional attribute elements:
index: The index of the element from the history list to
return. 1 means the most recent item, 2 means the one
before that, and so on.
<input> elements return an entry from the input history for
the current session.
"""
inputHistory = self.getPredicate(self._inputHistory, sessionID)
try: index = int(elem[1]['index'])
except: index = 1
try: return inputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <input> element.\n" % index
sys.stderr.write(err)
return ""
# <javascript>
def _processJavascript(self, elem, sessionID):
"""Process a <javascript> AIML element.
<javascript> elements process their contents recursively, and
then run the results through a server-side Javascript
interpreter to compute the final response. Implementations
are not required to provide an actual Javascript interpreter,
and right now PyAIML doesn't; <javascript> elements are behave
exactly like <think> elements.
"""
return self._processThink(elem, sessionID)
# <learn>
def _processLearn(self, elem, sessionID):
"""Process a <learn> AIML element.
<learn> elements process their contents recursively, and then
treat the result as an AIML file to open and learn.
"""
filename = ""
for e in elem[2:]:
filename += self._processElement(e, sessionID)
self.learn(filename)
return ""
# <li>
def _processLi(self,elem, sessionID):
"""Process an <li> AIML element.
Optional attribute elements:
name: the name of a predicate to query.
value: the value to check that predicate for.
<li> elements process their contents recursively and return
the results. They can only appear inside <condition> and
<random> elements. See _processCondition() and
_processRandom() for details of their usage.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# <lowercase>
def _processLowercase(self,elem, sessionID):
"""Process a <lowercase> AIML element.
<lowercase> elements process their contents recursively, and
then convert the results to all-lowercase.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response.lower()
# <person>
def _processPerson(self,elem, sessionID):
"""Process a <person> AIML element.
<person> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 2nd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person> tag is used atomically (e.g. <person/>), it is
a shortcut for <person><star/></person>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person/> = <person><star/></person>
response = self._processElement(['star',{}], sessionID)
return self._subbers['person'].sub(response)
# <person2>
def _processPerson2(self,elem, sessionID):
"""Process a <person2> AIML element.
<person2> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 3rd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person2> tag is used atomically (e.g. <person2/>), it is
a shortcut for <person2><star/></person2>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person2/> = <person2><star/></person2>
response = self._processElement(['star',{}], sessionID)
return self._subbers['person2'].sub(response)
# <random>
def _processRandom(self, elem, sessionID):
"""Process a <random> AIML element.
<random> elements contain zero or more <li> elements. If
none, the empty string is returned. If one or more <li>
elements are present, one of them is selected randomly to be
processed recursively and have its results returned. Only the
chosen <li> element's contents are processed. Any non-<li> contents are
ignored.
"""
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
if len(listitems) == 0:
return ""
# select and process a random listitem.
random.shuffle(listitems)
return self._processElement(listitems[0], sessionID)
# <sentence>
def _processSentence(self,elem, sessionID):
"""Process a <sentence> AIML element.
<sentence> elements process their contents recursively, and
then capitalize the first letter of the results.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
try:
response = response.strip()
words = response.split(" ", 1)
words[0] = words[0].capitalize()
response = ' '.join(words)
return response
except IndexError: # response was empty
return ""
# <set>
def _processSet(self, elem, sessionID):
"""Process a <set> AIML element.
Required element attributes:
name: The name of the predicate to set.
<set> elements process their contents recursively, and assign the results to a predicate
(given by their 'name' attribute) in the current session. The contents of the element
are also returned.
"""
value = ""
for e in elem[2:]:
value += self._processElement(e, sessionID)
#print( "@ELEM", elem )
self.setPredicate(elem[1]['name'], value, sessionID)
return value
# <size>
def _processSize(self,elem, sessionID):
"""Process a <size> AIML element.
<size> elements return the number of AIML categories currently
in the bot's brain.
"""
return str(self.numCategories())
# <sr>
def _processSr(self,elem,sessionID):
"""Process an <sr> AIML element.
<sr> elements are shortcuts for <srai><star/></srai>.
"""
star = self._processElement(['star',{}], sessionID)
response = self._respond(star, sessionID)
return response
# <srai>
def _processSrai(self,elem, sessionID):
"""Process a <srai> AIML element.
<srai> elements recursively process their contents, and then
pass the results right back into the AIML interpreter as a new
piece of input. The results of this new input string are
returned.
"""
print("srai elem=", elem)
newInput = ""
print("srai elem[2:]=", elem[2:])
for e in elem[2:]:
print("srai processElement e=", e)
rs = self._processElement(e, sessionID)
print("srai processElement returned rs=", rs)
newInput += " " + rs
newInput = newInput.strip()
print("srai newInput -> ", newInput)
print("srai respomd with newInput=", newInput)
resp_rs = None
if newInput.strip().lower().split()[0] == "my":
what = " ".join(newInput.strip().lower().split()[1:])
resp_rs = self._sessions[sessionID].get(what)
if not resp_rs:
resp_rs = self._respond(newInput, sessionID)
print("srai respomd returned ", resp_rs)
print("srai elem=", elem, " -> ", resp_rs)
return resp_rs
# <star>
def _processStar(self, elem, sessionID):
"""Process a <star> AIML element.
Optional attribute elements:
index: Which "*" character in the current pattern should
be matched?
<star> elements return the text fragment matched by the "*"
character in the current input pattern. For example, if the
input "Hello Tom Smith, how are you?" matched the pattern
"HELLO * HOW ARE YOU", then a <star> element in the template
would evaluate to "Tom Smith".
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input_ = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("star", input_, that, topic, index)
return response
# <system>
def _processSystem(self,elem, sessionID):
"""Process a <system> AIML element.
<system> elements process their contents recursively, and then
attempt to execute the results as a shell command on the
server. The AIML interpreter blocks until the command is
complete, and then returns the command's output.
For cross-platform compatibility, any file paths inside
<system> tags should use Unix-style forward slashes ("/") as a
directory separator.
"""
# build up the command string
command = ""
for e in elem[2:]:
command += self._processElement(e, sessionID)
# normalize the path to the command. Under Windows, this
# switches forward-slashes to back-slashes; all system
# elements should use unix-style paths for cross-platform
# compatibility.
#executable,args = command.split(" ", 1)
#executable = os.path.normpath(executable)
#command = executable + " " + args
command = os.path.normpath(command)
# execute the command.
response = ""
try:
out = os.popen(command)
except RuntimeError as msg:
if self._verboseMode:
err = "WARNING: RuntimeError while processing \"system\" element:\n%s\n" % self._cod.enc(msg)
sys.stderr.write(err)
return "There was an error while computing my response. Please inform my botmaster."
time.sleep(0.01) # I'm told this works around a potential IOError exception.
for line in out:
response += line + "\n"
response = ' '.join(response.splitlines()).strip()
return response
# <template>
def _processTemplate(self,elem, sessionID):
"""Process a <template> AIML element.
<template> elements recursively process their contents, and
return the results. <template> is the root node of any AIML
response tree.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# text
def _processText(self,elem, sessionID):
"""Process a raw text element.
Raw text elements aren't really AIML tags. Text elements cannot contain
other elements; instead, the third item of the 'elem' list is a text
string, which is immediately returned. They have a single attribute,
automatically inserted by the parser, which indicates whether whitespace
in the text should be preserved or not.
"""
try:
elem[2] + ""
except TypeError:
raise TypeError( "Text element contents are not text" )
# If the the whitespace behavior for this element is "default",
# we reduce all stretches of >1 whitespace characters to a single
# space. To improve performance, we do this only once for each
# text element encountered, and save the results for the future.
if elem[1]["xml:space"] == "default":
elem[2] = re.sub("\s+", " ", elem[2])
elem[1]["xml:space"] = "preserve"
return elem[2]
# <that>
def _processThat(self,elem, sessionID):
"""Process a <that> AIML element.
Optional element attributes:
index: Specifies which element from the output history to
return. 1 is the most recent response, 2 is the next most
recent, and so on.
<that> elements (when they appear inside <template> elements)
are the output equivilant of <input> elements; they return one
of the Kernel's previous responses.
"""
outputHistory = self.getPredicate(self._outputHistory, sessionID)
index = 1
try:
# According to the AIML spec, the optional index attribute
# can either have the form "x" or "x,y". x refers to how
# far back in the output history to go. y refers to which
# sentence of the specified response to return.
index = int(elem[1]['index'].split(',')[0])
except:
pass
try: return outputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <that> element.\n" % index
sys.stderr.write(err)
return ""
# <thatstar>
def _processThatstar(self, elem, sessionID):
"""Process a <thatstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <that> pattern to match.
<thatstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <thatstar/> returns
the portion of the previous input string that was matched by a
"*" in the current category's <that> pattern.
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input_ = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("thatstar", input_, that, topic, index)
return response
# <think>
def _processThink(self,elem, sessionID):
"""Process a <think> AIML element.
<think> elements process their contents recursively, and then
discard the results and return the empty string. They're
useful for setting predicates and learning AIML files without
generating any output.
"""
for e in elem[2:]:
self._processElement(e, sessionID)
return ""
# <topicstar>
def _processTopicstar(self, elem, sessionID):
"""Process a <topicstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <topic> pattern to match.
<topicstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <topicstar/>
returns the portion of current topic string that was matched
by a "*" in the current category's <topic> pattern.
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input_ = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("topicstar", input_, that, topic, index)
return response
# <uppercase>
def _processUppercase(self,elem, sessionID):
"""Process an <uppercase> AIML element.
<uppercase> elements process their contents recursively, and
return the results with all lower-case characters converted to
upper-case.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response.upper()
# <version>
def _processVersion(self,elem, sessionID):
"""Process a <version> AIML element.
<version> elements return the version number of the AIML
interpreter.
"""
return self.version()
|
from typing import List
from flask import request
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from marshmallow import schema
from .interface import EfficiencyIndicatorsInterface
from .model import EfficiencyIndicators
from .schema import EfficiencyIndicatorsSchema
from .service import EfficiencyIndicatorsService
api = Namespace('EfficiencyIndicators', description='Companies Efficiency Indicators')
@api.route('/')
class EfficiencyIndicatorsResource(Resource):
@responds(schema=EfficiencyIndicatorsSchema(many=True))
def get(self) -> List[EfficiencyIndicators]:
'''Get all companies debt indicators'''
return EfficiencyIndicatorsService.get_all()
@accepts(schema=EfficiencyIndicatorsSchema, api=api)
@responds(schema=EfficiencyIndicatorsSchema)
def post(self) -> EfficiencyIndicators:
'''Create a single company indicator'''
return EfficiencyIndicatorsService.create(request.parsed_obj)
@api.route('/<string:assetSymbol>')
@api.param('assetSymbol', 'Asset Symbol')
class EfficiencyIndicatorsSymbolResource(Resource):
@responds(schema=EfficiencyIndicatorsSchema)
def get(self, assetSymbol: str) -> EfficiencyIndicators:
'''Get single company indicator'''
return EfficiencyIndicatorsService.get_by_symbol(assetSymbol)
def delete(self, assetSymbol: str) -> Response:
'''Delete single company indicator'''
from flask import jsonify
symbol = EfficiencyIndicatorsService.delete_by_symbol(assetSymbol)
return jsonify(dict(status='Success', symbol=symbol))
@accepts(schema=EfficiencyIndicatorsSchema, api=api)
@responds(schema=EfficiencyIndicatorsSchema)
def put(self, assetSymbol: str) -> EfficiencyIndicators:
'''Update single company indicator'''
changes: EfficiencyIndicatorsInterface = request.parsed_obj
ei = EfficiencyIndicatorsService.get_by_symbol(assetSymbol)
return EfficiencyIndicatorsService.update(ei, changes)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from LinkedList import LinkedList
def find_begining(head: LinkedList) -> LinkedList:
slow = head
fast = head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
if fast is None or fast.next is None:
return None
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return fast
|
/**
* Note to self: when skipping a test with `this.skip()`, do not use an arrow function,
* since Mocha appears to try to bind `this` to the test function.
* @see https://mochajs.org/#arrow-functions
*
* This file is explicitly run first by mocha using the `--file` directive
* in package.json to let it clean the output directory first.
*
* @author Vincent Bruijn <vebruijn@gmail.com>
*/
var fs = require('fs');
const gd = require('../index');
var assert = require('chai').assert;
var source = __dirname + '/fixtures/';
var target = __dirname + '/output/';
before(function() {
// declare version
console.log('Built on top of GD version: ' + gd.getGDVersion() + '\n\n');
// clear test/output directory
return fs.readdir(target, function(err, files) {
return files.forEach(function(file, idx) {
if (file.substr(0, 6) === 'output') {
return fs.unlink(target + file, function(err) {
if (err) {
throw err;
}
});
}
});
});
});
describe('Meta information', function() {
it('gd.getGDVersion() -- will return a version number of format x.y.z', function(done) {
var version = gd.getGDVersion();
assert.ok(/[0-9]\.[0-9]\.[0-9]+/.test(version));
return done();
});
it('gd.GD_GIF -- will have built in GIF support', function() {
assert.equal(gd.GD_GIF, 1, 'No GIF support for libgd is impossible!');
});
it('gd.GD_GIF -- is not writeble', function() {
gd.GD_GIF = 99;
assert.equal(gd.GD_GIF, 1, 'gd.GD_GIF should not be writeable!');
});
it('gd.GD_GIFANIM -- will have built in GIF animation support', function() {
assert.equal(gd.GD_GIFANIM, 1, 'No GIF animation support for libgd is impossible!');
});
it('gd.GD_OPENPOLYGON -- will have built in open polygon support', function() {
assert.equal(gd.GD_OPENPOLYGON, 1, 'No open polygon support for libgd is impossible!');
});
});
describe('GD color functions', function() {
// it('', function(done) {});
it('gd.trueColor() -- can return an integer representation of rgb color values', function(done) {
var red = gd.trueColor(255, 0, 0);
assert.ok(16711680 === red);
return done();
});
it('gd.trueColorAlpha() -- can return an integer representation of rgba color values', function(done) {
var transparentRed = gd.trueColorAlpha(255, 0, 0, 63);
assert.ok(1073676288 === transparentRed);
return done();
});
});
describe('Image query functions', function() {
it('gd.Image#getBoundsSafe() -- getBoundsSafe should return 0 if the coordinate [-10, 1000] is checked against the image bounds', async function() {
var s = source + 'input.png';
var coord = [-10, 1000];
const image = await gd.openPng(s);
assert.ok(image.getBoundsSafe(coord[0], coord[1]) === 0);
image.destroy();
});
it('gd.Image#getBoundsSafe() -- getBoundsSafe should return 1 if the coordinate [10, 10] is checked against the image bounds', async function() {
var s = source + 'input.png';
var coord = [10, 10];
const image = await gd.openPng(s);
assert.ok(image.getBoundsSafe(coord[0], coord[1]) === 1);
image.destroy();
});
it('gd.Image#getTrueColorPixel() -- getTrueColorPixel should return "e6e6e6" when queried for coordinate [10, 10]', async function() {
var s = source + 'input.png';
var coord = [10, 10];
const image = await gd.openPng(s);
var color;
color = image.getTrueColorPixel(coord[0], coord[1]);
assert.ok(color.toString(16) === 'e6e6e6');
});
it('gd.Image#getTrueColorPixel() -- getTrueColorPixel should return 0 when queried for coordinate [101, 101]', async function() {
var s = source + 'input.png';
var coord = [101, 101];
const image = await gd.openPng(s);
var color;
color = image.getTrueColorPixel(coord[0], coord[1]);
assert.ok(color === 0);
});
it('gd.Image#imageColorAt() -- imageColorAt should return "be392e" when queried for coordinate [50, 50]', async function() {
var s = source + 'input.png';
var coord = [50, 50];
const image = await gd.openPng(s);
var color;
color = image.imageColorAt(coord[0], coord[1]);
assert.ok(color.toString(16) === 'be392e');;
});
it('gd.Image#imageColorAt() -- imageColorAt should throw an error when queried for coordinate [101, 101]', async function() {
const s = source + 'input.png';
const coord = [101, 101];
const image = await gd.openPng(s);
let color;
try {
color = image.imageColorAt(coord[0], coord[1]);
} catch (exception) {
assert.ok(exception instanceof Error);
}
});
});
describe('Image filter functions', function() {
it('gd.Image#copyResampled() -- can scale-down (resize) an image', async () => {
var s, t;
s = source + 'input.png';
t = target + 'output-scale.png';
const img = await gd.openPng(s);
var canvas, h, scale, w;
scale = 2;
w = Math.floor(img.width / scale);
h = Math.floor(img.height / scale);
canvas = await gd.createTrueColor(w, h);
img.copyResampled(canvas, 0, 0, 0, 0, w, h, img.width, img.height);
await canvas.savePng(t, 1);
assert.ok(fs.existsSync(t));
img.destroy();
canvas.destroy();
});
it('gd.Image#copyRotated() -- can rotate an image', async function() {
var s, t;
s = source + 'input.png';
t = target + 'output-rotate.png';
const img = await gd.openPng(s);
var canvas, h, w;
w = 100;
h = 100;
canvas = await gd.createTrueColor(w, h);
img.copyRotated(canvas, 50, 50, 0, 0, img.width, img.height, 45);
await canvas.savePng(t, 1);
assert.ok(fs.existsSync(t));
img.destroy();
canvas.destroy();
});
it('gd.Image#grayscale() -- can convert to grayscale', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-grayscale.png';
const img = await gd.openPng(s);
img.grayscale();
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#gaussianBlur() -- can add gaussian blur to an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-gaussianblur.png';
const img = await gd.openPng(s);
var i, j;
for (i = j = 0; j < 10; i = ++j) {
img.gaussianBlur();
}
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#negate() -- can negate an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-negate.png';
const img = await gd.openPng(s);
img.negate();
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#brightness() -- can change brightness of an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-brightness.png';
const img = await gd.openPng(s);
const brightness = Math.floor(Math.random() * 100);
img.brightness(brightness);
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#contrast() -- can change contrast of an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-contrast.png';
const img = await gd.openPng(s);
const contrast = Math.floor(Math.random() * 2000) - 900;
img.contrast(contrast);
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#emboss() -- can emboss an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-emboss.png';
const img = await gd.openPng(s);
img.emboss();
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#selectiveBlur() -- can apply selective blur to an image', async function() {
var s, t;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
s = source + 'input.png';
t = target + 'output-selectiveBlur.png';
const img = await gd.openPng(s);
img.selectiveBlur();
await img.savePng(t, -1);
assert.ok(fs.existsSync(t));
img.destroy();
});
it('gd.Image#colorReplace() -- can replace a color to another color', async function() {
var img, s, t;
s = source + 'input.png';
t = target + 'output-replaced.png';
const image = await gd.openPng(s);
var colors = [
image.getTrueColorPixel(10,10),
image.getTrueColorPixel(10,11),
image.getTrueColorPixel(10,12),
image.getTrueColorPixel(10,13),
image.getTrueColorPixel(10,14),
image.getTrueColorPixel(10,15)
];
var colorTo = gd.trueColor(0,255,255);
for (var i = 0; i < colors.length; i++) {
image.colorReplace(colors[i], colorTo);
}
await image.savePng(t, 0);
assert.ok(fs.existsSync(t));
image.destroy();
});
it('gd.Image#stringFT() -- can create a truecolor BMP image with text', async function() {
var f, img, t, txtColor;
if (gd.getGDVersion() < '2.1.1') {
return this.skip();
}
f = source + 'FreeSans.ttf';
t = target + 'output-truecolor-string.bmp';
img = await gd.createTrueColor(120, 20);
txtColor = img.colorAllocate(255, 255, 0);
img.stringFT(txtColor, f, 16, 0, 8, 18, "Hello world!");
await img.saveBmp(t, 0);
assert.ok(fs.existsSync(t));
});
});
|
const faker = require('faker')
const numPatients = 100
function randomPatient () {
return {
address: {
street: faker.address.streetAddress(),
zip: faker.address.zipCode().substr(0, 5)
},
dob: faker.date.between(new Date(1945, 0, 1), new Date()).toISOString().substr(0, 10),
fname: faker.name.firstName(),
lname: faker.name.lastName(),
medicalRecordNumber: getRandomInt(22000, 93000) + '',
ssn: getRandomInt(999999999, 100000000) + '',
visitNumber: getRandomInt(243000, 915000) + ''
}
}
function getRandomInt (min, max) {
min = Math.ceil(min)
max = Math.floor(max)
return Math.floor(Math.random() * (max - min + 1)) + min
}
let patients = []
for (let i = 0; i < numPatients; i++) {
patients.push(randomPatient())
}
console.log(JSON.stringify(patients, null, 2))
|
#!/usr/bin/env python
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib as mpl
import os
import sys
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
from glob import glob
import itertools as it
import matplotlib.patches as mpatches
def rescale(fname, suff='png'):
'''
Journals generally like to make life easier for reviewers
by sending them a manuscript that is not going to crash
their computers with its size, so we're going to create
a smaller version of the input figure (fname) that is
8 inches wide at 200 dpi. It will be saved out in whatever
format specified by the suff parameter, and the name
will be the same as the original but with _LowRes appended
'''
from PIL import Image
import numpy as np
# Open the file and figure out what size it is
img = Image.open(fname+'.'+suff)
size = img.size
# Calculate the scale factor that sets the width
# of the figure to 1600 pixels
scale_factor = 1600.0/size[0]
# Apply this scale factor to the width and height
# to get the new size
new_size = (np.int(size[0]*scale_factor), np.int(size[1]*scale_factor))
# Resize the image
small_img = img.resize(new_size, Image.ANTIALIAS)
# Define the output name
new_name = ''.join([os.path.splitext(fname)[0],
'_LowRes.',
suff])
# Save the image
small_img.save(new_name, optimize=True, quality=95)
# And you're done!
def view_corr_mat(corr_mat_file, output_name, cmap_name='RdBu_r', cost=None, bin=False):
# Read in the data
M = np.loadtxt(corr_mat_file)
# If cost is given then roughly threshold at that cost.
# NOTE - this is not actually the EXACT network that you're analysing
# because it doesn't include the minimum spanning tree. But it will give
# you a good sense of the network structure.
# #GoodEnough ;)
if cost:
thr = np.percentile(M.reshape(-1), 100-cost)
M[M<thr] = 0
vmin=0
vmax=1
ticks_dict = { 'locations' : [ 0, 1 ],
'labels' : [ '0', '1' ] }
else:
vmin=-1
vmax=1
ticks_dict = { 'locations' : [ -1, 0, 1 ],
'labels' : [ '-1', '0', '1' ] }
if bin:
M[M>0] = 1
# Create an axis
fig, ax = plt.subplots(figsize=(6,5))
ax.axis('off')
# Show the network measures
mat_ax = ax.imshow(M,
interpolation='none',
cmap=cmap_name,
vmin=vmin,
vmax=vmax)
# Put a box around your data
ax.add_patch(
mpatches.Rectangle(
(ax.get_xlim()[0], ax.get_ylim()[1]),
ax.get_xlim()[1],
ax.get_ylim()[0],
fill=False, # remove background
color='k',
linewidth=1) )
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(mat_ax, ticks=ticks_dict['locations'])
cbar.ax.set_yticklabels(ticks_dict['labels']) # vertically oriented colorbar
plt.tight_layout()
# Save the picture
fig.savefig(output_name, bbox_inches=0, dpi=100)
rescale(output_name)
plt.close(fig)
|
"""This module contains the general information for SyntheticFileSystem ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SyntheticFileSystemConsts:
pass
class SyntheticFileSystem(ManagedObject):
"""This is SyntheticFileSystem class."""
consts = SyntheticFileSystemConsts()
naming_props = set([])
mo_meta = MoMeta("SyntheticFileSystem", "syntheticFileSystem", "FS-", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["read-only"], [u'topRoot'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "SyntheticFileSystem", **kwargs)
|
/* @flow strict-local */
import { createSelector } from 'reselect';
import type { GlobalState, PerAccountState, UserOrBot, Selector, User, UserId } from '../types';
import { getUsers, getCrossRealmBots, getNonActiveUsers } from '../directSelectors';
import { tryGetAuth, tryGetActiveAccountState } from '../account/accountsSelectors';
/**
* All users in this Zulip org (aka realm).
*
* In particular this includes:
* * cross-realm bots
* * deactivated users (`is_active` false; see `User` and the linked docs)
*
* This is the right list to use in any UI context that might involve things
* a user did in the past: messages they sent, reactions they added, etc.
* Deactivating a user means they can't log in and see or send new messages,
* and doesn't erase them from history.
*
* In contexts that are about offering *new* interactions -- like choosing a
* user to send a PM to -- deactivated users should be left out.
*
* See:
* * `getActiveUsersById` for leaving out deactivated users
* * `User` for details on properties, and links to docs.
*/
const getAllUsers: Selector<$ReadOnlyArray<UserOrBot>> = createSelector(
getUsers,
getNonActiveUsers,
getCrossRealmBots,
(users = [], nonActiveUsers = [], crossRealmBots = []) => [
...users,
...nonActiveUsers,
...crossRealmBots,
],
);
/** See `getAllUsers` for discussion. */
export const getAllUsersById: Selector<Map<UserId, UserOrBot>> = createSelector(
getAllUsers,
allUsers => new Map(allUsers.map(user => [user.user_id, user])),
);
/**
* See `getAllUsers` for discussion.
*
* Prefer `getAllUsersById`; see #3764.
*
*/
export const getAllUsersByEmail: Selector<Map<string, UserOrBot>> = createSelector(
getAllUsers,
allUsers => new Map(allUsers.map(user => [user.email, user])),
);
/**
* PRIVATE; exported only for tests.
*
* WARNING: despite the name, only (a) `is_active` users (b) excluding cross-realm bots.
*
* See `getAllUsersById`, and `getAllUsers` for discussion.
*/
export const getUsersById: Selector<Map<UserId, User>> = createSelector(
getUsers,
(users = []) => new Map(users.map(user => [user.user_id, user])),
);
/**
* WARNING: despite the name, only (a) `is_active` users (b) excluding cross-realm bots.
*
* See `getAllUsers`.
*/
export const getSortedUsers: Selector<$ReadOnlyArray<User>> = createSelector(getUsers, users =>
[...users].sort((x1, x2) => x1.full_name.toLowerCase().localeCompare(x2.full_name.toLowerCase())),
);
/**
* The user's own user ID in the active account.
*
* Throws if we have no data from the server.
*
* See also `getOwnEmail` and `getOwnUser`.
*/
// Not currently used, but should replace uses of `getOwnEmail` (e.g. inside
// `getOwnUser`). See #3764.
export const getOwnUserId = (state: PerAccountState): UserId => {
const { user_id } = state.realm;
if (user_id === undefined) {
throw new Error('No server data found');
}
return user_id;
};
/**
* The user's own email in the active account.
*
* Throws if we have no data from the server.
*
* Prefer using `getOwnUserId` or `getOwnUser`; see #3764.
*/
export const getOwnEmail = (state: PerAccountState): string => {
const { email } = state.realm;
if (email === undefined) {
throw new Error('No server data found');
}
return email;
};
/**
* The person using the app, as represented by a `User` object.
*
* This is the server's information about the active, logged-in account, in
* the same form as the information we get from the server about everyone
* else in the organization.
*
* Throws if we have no such information.
*
* See also `getOwnUserId` and `getOwnEmail`.
*/
export const getOwnUser = (state: PerAccountState): User => {
const ownUser = getUsersById(state).get(getOwnUserId(state));
if (ownUser === undefined) {
throw new Error('Have ownUserId, but not found in user data');
}
return ownUser;
};
/**
* The user with the given user ID, or null if no such user is known.
*
* This works for any user in this Zulip org/realm, including deactivated
* users and cross-realm bots. See `getAllUsers` for details.
*
* See `getUserForId` for a version which only ever returns a real user,
* throwing if none. That makes it a bit simpler to use in contexts where
* we assume the relevant user must exist, which is true of most of the app.
*/
export const tryGetUserForId = (state: PerAccountState, userId: UserId): UserOrBot | null =>
getAllUsersById(state).get(userId) ?? null;
/**
* The user with the given user ID.
*
* This works for any user in this Zulip org/realm, including deactivated
* users and cross-realm bots. See `getAllUsers` for details.
*
* Throws if no such user exists.
*
* See `tryGetUserForId` for a non-throwing version.
*/
export const getUserForId = (state: PerAccountState, userId: UserId): UserOrBot => {
const user = tryGetUserForId(state, userId);
if (!user) {
throw new Error(`getUserForId: missing user: id ${userId}`);
}
return user;
};
/**
* DEPRECATED except as a cache private to this module.
*
* Excludes deactivated users. See `getAllUsers` for discussion.
*
* Instead of this selector, use:
* * `getAllUsersById` for data on an arbitrary user
* * `getUserIsActive` for the specific information of whether a user is
* deactivated.
*/
const getActiveUsersById: Selector<Map<UserId, UserOrBot>> = createSelector(
getUsers,
getCrossRealmBots,
(users = [], crossRealmBots = []) =>
new Map([...users, ...crossRealmBots].map(user => [user.user_id, user])),
);
/**
* The value of `is_active` for the given user.
*
* For a normal user, this is true unless the user or an admin has
* deactivated their account. The name comes from Django; this property
* isn't related to presence or to whether the user has recently used Zulip.
*
* (Conceptually this should be a property on the `User` object; the reason
* it isn't is just that the Zulip API presents this information in a funny
* other way.)
*/
// To understand this implementation, see the comment about `is_active` in
// the `User` type definition.
export const getUserIsActive = (state: PerAccountState, userId: UserId): boolean =>
!!getActiveUsersById(state).get(userId);
/**
* Whether we have server data for the active account.
*
* This can be used to decide whether the app's main UI which shows data
* from the server should render itself, or should fall back to a loading
* screen.
*
* See {@link getHaveServerData}.
*/
export const getHaveServerDataGlobal = (globalState: GlobalState): boolean => {
// Any valid server data is about the active account. So if there is no
// active account, then any server data we appear to have can't be valid.
const state = tryGetActiveAccountState(globalState);
if (!state) {
// (For background to this comment's reasoning, see getHaveServerData.)
//
// From `accountsReducer`:
// * This condition is resolved by LOGIN_SUCCESS.
// * It's created only by ACCOUNT_REMOVE.
//
// When this condition applies, LOGIN_SUCCESS is the only way we might
// navigate to the main UI.
//
// ACCOUNT_REMOVE is available only from the account-picker screen (not
// the main UI), and moreover is available for the active account only
// when not logged in, in which case the main UI can't be on the
// navigation stack either.
return false;
}
/* eslint-disable-next-line no-use-before-define */
return getHaveServerData(state);
};
/**
* Whether we have server data for this account.
*
* See also {@link getHaveServerDataGlobal}.
*/
// Note that in our pre-#5006 model where a PerAccountState is secretly just
// GlobalState and implicitly means the active account, if there is *no*
// active account (i.e. if there are no accounts at all), then this will
// throw an exception, not return false. If that's not desired, then the
// caller is really working with global state and should use
// `getHaveServerDataGlobal`.
export const getHaveServerData = (state: PerAccountState): boolean => {
// The implementation has to be redundant, because upon rehydrate we can
// unfortunately have some of our state subtrees containing server data
// while others don't, reflecting different points in time from the last
// time the app ran. In particular, if the user switched accounts (so
// that we cleared server data in Redux) and then the app promptly
// crashed, or was killed, that clearing-out may have reached some
// subtrees but not others. See #4587 for an example, and #4841 overall.
// It's important that we never stick around in a state where we're trying
// to show the main UI but this function returns false. When in that
// state, we just show a loading screen with no UI, so there has to be
// something happening in the background that will get us out of it.
//
// The basic strategy is:
// * When we start showing the main UI, we always kick off an initial
// fetch. Specifically:
// * If at startup (upon rehydrate) we show the main UI, we do so.
// This is controlled by `getInitialRouteInfo`, together with
// `sessionReducer` as it sets `needsInitialFetch`.
// * When we navigate to the main UI (via `resetToMainTabs`), we always
// also dispatch an action that causes `needsInitialFetch` to be set.
// * Plus, that initial fetch has a timeout, so it will always take us
// away from a loading screen regardless of server/network behavior.
//
// * When we had server data and we stop having it, we always also either
// navigate away from the main UI, or kick off a new initial fetch.
// Specifically:
// * Between this function and the reducers, we should only stop having
// server data upon certain actions in `accountActions`.
// * Some of those actions cause `needsInitialFetch` to be set, as above.
// * Those that don't should always be accompanied by navigating away
// from the main UI, with `resetToAccountPicker`.
//
// Ideally the decisions "should we show the loading screen" and "should
// we kick off a fetch" would be made together in one place, so that it'd
// be possible to confirm they align without so much nonlocal reasoning.
// Specific facts used in the reasoning below (within the strategy above):
// * The actions LOGIN_SUCCESS and ACCOUNT_SWITCH cause
// `needsInitialFetch` to be set.
// * The action LOGOUT is always accompanied by navigating away from the
// main UI.
// * A successful initial fetch causes a REGISTER_COMPLETE action. A failed one
// causes either LOGOUT, or an abort that ensures we're not at a
// loading screen.
//
// (The same background facts are used in getHaveServerDataGlobal, too.)
// Any valid server data comes from the account being logged in.
if (!tryGetAuth(state)) {
// From `accountsReducer`:
// * This condition is resolved by LOGIN_SUCCESS.
// * It's created only by ACCOUNT_REMOVE, by LOGOUT, and by (a
// hypothetical) ACCOUNT_SWITCH for a logged-out account.
//
// When this condition applies, LOGIN_SUCCESS is the only way we might
// navigate to the main UI.
//
// For ACCOUNT_REMOVE, see the previous condition.
// ACCOUNT_SWITCH we only do for logged-in accounts.
return false;
}
// Valid server data must have a user: the self user, at a minimum.
if (getUsers(state).length === 0) {
// From `usersReducer`:
// * This condition is resolved by REGISTER_COMPLETE.
// * It's created only by LOGIN_SUCCESS, LOGOUT, and ACCOUNT_SWITCH.
return false;
}
// It must also have the self user's user ID.
const ownUserId = state.realm.user_id;
if (ownUserId === undefined) {
// From `realmReducer`:
// * This condition is resolved by REGISTER_COMPLETE.
// * It's created only by LOGIN_SUCCESS, LOGOUT, and ACCOUNT_SWITCH.
return false;
}
// We can also do a basic consistency check between those two subtrees:
// the self user identified in `state.realm` is among those we have in
// `state.users`. (If for example the previous run of the app switched
// accounts, and got all the way to writing the new account's
// `state.realm` but not even clearing out `state.users` or vice versa,
// then this check would fire. And in that situation without this check,
// we crash early on because `getOwnUser` fails.)
if (!getUsersById(state).get(ownUserId)) {
// From the reducers (and assumptions about the server's data):
// * This condition is resolved by REGISTER_COMPLETE.
// * It's never created (post-rehydrate.)
return false;
}
// TODO: A nice bonus would be to check that the account matches the
// server data, given any of:
// * user ID in `Account` (#4951)
// * realm URL in `RealmState`
// * delivery email in `RealmState` and/or `User` (though not sure this
// is available from server, even for self, in all configurations)
// Any other subtree could also have been emptied while others weren't,
// or otherwise be out of sync.
//
// But it appears that in every other subtree containing server state, the
// empty state (i.e. the one we reset to on logout or account switch) is a
// valid possible state. That means (a) we can't so easily tell that it's
// out of sync, but also (b) the app's UI is not so likely to just crash
// from the get-go if it is -- because at least it won't crash simply
// because the state is empty.
//
// There're still plenty of other ways different subtrees can be out of
// sync with each other: `state.narrows` could know about some new message
// that `state.messages` doesn't, or `state.messages` have a message sent
// by a user that `state.users` has no record of.
//
// But given that shortly after starting to show the main app UI (whether
// that's at startup, or after picking an account or logging in) we go
// fetch fresh data from the server anyway, the checks above are hopefully
// enough to let the app survive that long.
return true;
};
|
var class_leap_1_1_tool_list =
[
[ "const_iterator", "class_leap_1_1_tool_list.html#a7f52ee5561016e8d42512e2adbc820de", null ],
[ "ToolList", "class_leap_1_1_tool_list.html#a28459f829ceca1ac6edfe5dd5f0a73d5", null ],
[ "ToolList", "class_leap_1_1_tool_list.html#a6878221b6eeaa99bb64840d5186493e3", null ],
[ "append", "class_leap_1_1_tool_list.html#a5fd54d5a415377e32f50c39a0b46a884", null ],
[ "begin", "class_leap_1_1_tool_list.html#a67deec63ce827250a70c3f4a408dff81", null ],
[ "count", "class_leap_1_1_tool_list.html#a7e5df033c5f217fb173613339fc78cda", null ],
[ "empty", "class_leap_1_1_tool_list.html#a64586106af4e4c13021e488c6091acd4", null ],
[ "end", "class_leap_1_1_tool_list.html#a60b8b7262fbd1ac0980b2d493c6e4e5c", null ],
[ "frontmost", "class_leap_1_1_tool_list.html#ac94179ceaffd2d1deb87c452517b9fdd", null ],
[ "isEmpty", "class_leap_1_1_tool_list.html#abd70b38dbeddb37c5be77ee6b9336666", null ],
[ "leftmost", "class_leap_1_1_tool_list.html#a105bec25ad4366959e8f02c383ff2313", null ],
[ "operator[]", "class_leap_1_1_tool_list.html#ae04777b465395513a71fa1e67729691e", null ],
[ "rightmost", "class_leap_1_1_tool_list.html#a45a79c09b6690eac76985a2b883af3d1", null ]
];
|
/************************************************************* 所有带确认的ajax提交btn ********************************************************/
/* get执行并返回结果,执行后不带跳转 */
$(function(){
$('.rst-btn').click(function(){
var $url=this.href;
$.get($url,function(data){
if(data.status==1){
layer.alert(data.info, {icon: 6});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* get执行并返回结果,执行后带跳转 */
$(function(){
$('.rst-url-btn').click(function(){
var $url=this.href;
$.get($url, function(data){
if(data.status){
layer.alert(data.info, {icon: 6}, function(index){
layer.close(index);
window.location.href=data.url;
});
} else {
layer.alert(data.info, {icon: 5}, function(index){
layer.close(index);
});
}
}, "json");
return false;
});
});
/* 直接跳转 */
$(function(){
$(".confirm-btn").click(function(){
var $url=this.href,
$info=$(this).data('info');
layer.confirm($info, {icon: 3}, function(index){
layer.close(index);
window.location.href=$url;
});
return false;
});
});
/* post执行并返回结果,执行后不带跳转 */
$(function(){
$('.confirm-rst-btn').click(function(){
var $url=this.href,
$info=$(this).data('info');
layer.confirm($info, {icon: 3}, function(index){
layer.close(index);
$.post($url,{},function(data){
layer.alert(data.info, {icon: 6});
}, "json");
});
return false;
});
});
/* get执行并返回结果,执行后带跳转 */
$(function(){
$('.confirm-rst-url-btn').click(function(){
var $url=this.href,
$info=$(this).data('info');
layer.confirm($info, {icon: 3}, function(index){
layer.close(index);
$.get($url, function(data){
if(data.status){
layer.alert(data.info, {icon: 6}, function(index){
layer.close(index);
window.location.href=data.url;
});
} else {
layer.alert(data.info, {icon: 5}, function(index){
layer.close(index);
});
}
}, "json");
});
return false;
});
});
/*************************************************************************** 所有状态类的ajax提交btn ********************************************************/
/* 审核状态操作 */
$(function(){
$(".state-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{x:val}, function(data){
if(data.status){
if(data.info=='未审'){
var a='<button class="btn btn-minier btn-danger">未审</button>'
$('#zt'+val).html(a);
return false;
}else{
var b='<button class="btn btn-minier btn-yellow">已审</button>'
$('#zt'+val).html(b);
return false;
}
} else {
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 启用状态操作 */
$(function(){
$(".open-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{x:val}, function(data){
if(data.status){
if(data.info=='状态禁止'){
var a='<button class="btn btn-minier btn-danger">禁用</button>'
$('#zt'+val).html(a);
return false;
}else{
var b='<button class="btn btn-minier btn-yellow">开启</button>'
$('#zt'+val).html(b);
return false;
}
} else {
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 显示状态操作 */
$(function(){
$(".display-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{x:val}, function(data){
if(data.status){
if(data.info=='状态禁止'){
var a='<button class="btn btn-minier btn-danger">隐藏</button>'
$('#zt'+val).html(a);
return false;
}else{
var b='<button class="btn btn-minier btn-yellow">显示</button>'
$('#zt'+val).html(b);
return false;
}
} else {
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 激活状态操作 */
$(function(){
$(".active-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{x:val}, function(data){
if(data.status){
if(data.info=='未激活'){
var a='<button class="btn btn-minier btn-danger">未激活</button>'
$('#jh'+val).html(a);
return false;
}else{
var b='<button class="btn btn-minier btn-yellow">已激活</button>'
$('#jh'+val).html(b);
return false;
}
} else {
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/*************************************************************************** 所有ajaxForm提交 ********************************************************/
/* 通用表单不带检查操作,失败不跳转 */
$(function(){
$('.ajaxForm').ajaxForm({
success: complete2, // 这是提交后的方法
dataType: 'json'
});
});
/* 通用表单不带检查操作,失败跳转 */
$(function(){
$('.ajaxForm2').ajaxForm({
success: complete, // 这是提交后的方法
dataType: 'json'
});
});
/* 会员增加编辑表单,带检查 */
$(function(){
$('.memberform').ajaxForm({
beforeSubmit: checkmemberForm, // 此方法主要是提交前执行的方法,根据需要设置
success: complete, // 这是提交后的方法
dataType: 'json'
});
});
/* admin增加编辑表单,带检查 */
$(function(){
$('.adminform').ajaxForm({
beforeSubmit: checkadminForm, // 此方法主要是提交前执行的方法,根据需要设置
success: complete, // 这是提交后的方法
dataType: 'json'
});
});
/* 多选删除操作 */
$(function(){
$('#alldel').ajaxForm({
beforeSubmit: checkselectForm, // 此方法主要是提交前执行的方法,根据需要设置,一般是判断为空获取其他规则
success: complete2, // 这是提交后的方法
dataType: 'json'
});
});
//失败跳转
function complete(data){
if(data.status==1){
layer.alert(data.info, {icon: 6}, function(index){
layer.close(index);
window.location.href=data.url;
});
}else{
layer.alert(data.info, {icon: 5}, function(index){
layer.close(index);
window.location.href=data.url;
});
return false;
}
}
//失败不跳转
function complete2(data){
if(data.status==1){
layer.alert(data.info, {icon: 6}, function(index){
layer.close(index);
window.location.href=data.url;
});
}else{
layer.alert(data.info, {icon: 5}, function(index){
layer.close(index);
});
}
}
//admin表单检查
function checkadminForm(){
var admin_username = $.trim($('input[name="admin_username"]').val()); //获取INPUT值
var myReg = /^[\u4e00-\u9fa5]+$/;//验证中文
if(admin_username.indexOf(" ")>=0)
{
layer.alert('登录用户名包含了空格,请重新输入', {icon: 5}, function(index){
layer.close(index);
$('#admin_username').focus();
});
return false;
}
if (myReg.test(admin_username)) {
layer.alert('用户名必须是字母,数字,符号', {icon: 5}, function(index){
layer.close(index);
$('#admin_username').focus();
});
return false;
}
if (!$("#admin_tel").val().match(/^(((13[0-9]{1})|(15[0-9]{1})|(17[0-9]{1})|(18[0-9]{1}))+\d{8})$/)) {
layer.alert('电话号码格式不正确', {icon: 5}, function(index){
layer.close(index);
$('#admin_tel').focus();
});
return false;
}
}
//member表单检查
function checkmemberForm(){
if (!$("#member_list_tel").val().match(/^(((13[0-9]{1})|(15[0-9]{1})|(17[0-9]{1})|(18[0-9]{1}))+\d{8})$/)) {
layer.alert('电话号码格式不正确', {icon: 5}, function(index){
layer.close(index);
$('#member_list_tel').focus();
});
return false;
}
}
//多选表单检查
function checkselectForm(){
var chk_value =[];
$('input[id="navid"]:checked').each(function(){
chk_value.push($(this).val());
});
if(!chk_value.length){
layer.alert('至少选择一个删除项', {icon: 5});
return false;
}
}
/*************************************************************************** 所有css操作 ********************************************************/
/* 菜单样式 */
jQuery(function($) {
//插入header-nav
$('#sidebar2').insertBefore('.page-content');
$('.navbar-toggle[data-target="#sidebar2"]').insertAfter('#menu-toggler');
//固定
$(document).on('settings.ace.two_menu', function(e, event_name, event_val) {
if(event_name == 'sidebar_fixed') {
if( $('#sidebar').hasClass('sidebar-fixed') ) {
$('#sidebar2').addClass('sidebar-fixed');
$('#navbar').addClass('h-navbar');
}
else {
$('#sidebar2').removeClass('sidebar-fixed')
$('#navbar').removeClass('h-navbar');
}
}
}).triggerHandler('settings.ace.two_menu', ['sidebar_fixed' ,$('#sidebar').hasClass('sidebar-fixed')]);
})
/* 多选判断 */
function unselectall(){
if(document.myform.chkAll.checked){
document.myform.chkAll.checked = document.myform.chkAll.checked&0;
}
}
function CheckAll(form){
for (var i=0;i<form.elements.length;i++){
var e = form.elements[i];
if (e.Name != 'chkAll'&&e.disabled==false)
e.checked = form.chkAll.checked;
}
}
/* 权限配置 */
$(function(){
//动态选择框,上下级选中状态变化
$('input.checkbox-parent').on('change',function(){
var dataid=$(this).attr("dataid");
$('input[dataid^='+dataid+']').prop('checked',$(this).is(':checked'));
});
$('input.checkbox-child').on('change',function(){
var dataid=$(this).attr("dataid");
dataid=dataid.substring(0,dataid.lastIndexOf("-"));
var parent=$('input[dataid='+dataid+']');
if($(this).is(':checked')){
parent.prop('checked',true);
//循环到顶级
while(dataid.lastIndexOf("-")!=2){
dataid=dataid.substring(0,dataid.lastIndexOf("-"));
parent=$('input[dataid='+dataid+']');
parent.prop('checked',true);
}
}else{
//父级
if($('input[dataid^='+dataid+'-]:checked').length==0){
parent.prop('checked',false);
//循环到顶级
while(dataid.lastIndexOf("-")!=2){
dataid=dataid.substring(0,dataid.lastIndexOf("-"));
parent=$('input[dataid='+dataid+']');
if($('input[dataid^='+dataid+'-]:checked').length==0){
parent.prop('checked',false);
}
}
}
}
});
});
//模态框状态
$(document).ready(function(){
$("#myModaledit").hide();
$("#gb").click(function(){
$("#myModaledit").hide(200);
});
$("#gbb").click(function(){
$("#myModaledit").hide(200);
});
$("#gbbb").click(function(){
$("#myModaledit").hide(200);
});
});
$(document).ready(function(){
$("#myModal").hide();
$("#gb").click(function(){
$("#myModal").hide(200);
});
$("#gbb").click(function(){
$("#myModal").hide(200);
});
$("#gbbb").click(function(){
$("#myModal").hide(200);
});
});
/*************************************************************************** 所有ajax获取编辑数据 ********************************************************/
/* 会员组修改操作 */
$(function(){
$(".memberedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{member_group_id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#editmember_group_id").val(data.member_group_id);
$("#editmember_group_name").val(data.member_group_name);
$("#editmember_group_open").val(data.member_group_open);
$("#editmember_group_toplimit").val(data.member_group_toplimit);
$("#editmember_group_bomlimit").val(data.member_group_bomlimit);
$("#editmember_group_order").val(data.member_group_order);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 友链类型 */
function openWindow(a,b,c) {
$(document).ready(function(){
$("#myModal").show(300);
$("#plug_linktype_id").val(a);
$("#newplug_linktype_name").val(b);
$("#newplug_linktype_order").val(c);
});
}
/* 友链编辑 */
$(function(){
$(".linkedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{plug_link_id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#editplug_link_id").val(data.plug_link_id);
$("#editplug_link_name").val(data.plug_link_name);
$("#editplug_link_url").val(data.plug_link_url);
$("#editplug_link_target").val(data.plug_link_target);
$("#editplug_link_qq").val(data.plug_link_qq);
$("#editplug_link_order").val(data.plug_link_order);
$("#editplug_link_typeid").val(data.plug_link_typeid);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 广告位编辑 */
$(function(){
$(".adtypeedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{plug_adtype_id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#adtype_id").val(data.plug_adtype_id);
$("#adtype_name").val(data.plug_adtype_name);
$("#adtype_order").val(data.plug_adtype_order);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 路由规则编辑 */
$(function(){
$(".routeedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#editroute_id").val(data.id);
$("#editroute_full_url").val(data.full_url);
$("#editroute_url").val(data.url);
if(data.r_status==1){
$("#editroute_status").attr("checked", true);
}else{
$("#editroute_status").attr("checked", false);
}
$("#editroute_listorder").val(data.listorder);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/* 来源编辑 */
$(function(){
$(".sourceedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{source_id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#editsource_id").val(data.source_id);
$("#editsource_name").val(data.source_name);
$("#editsource_order").val(data.source_order);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
//来源
function souadd(val){
$('#news_source').val(val);
}
/* 微信菜单编辑 */
$(function(){
$(".menuedit-btn").click(function(){
var $url=this.href,
val=$(this).data('id');
$.post($url,{we_menu_id:val}, function(data){
if(data.status==1){
$(document).ready(function(){
$("#myModaledit").show(300);
$("#editwe_menu_id").val(data.we_menu_id);
$("#editwe_menu_name").val(data.we_menu_name);
$("#editwe_menu_leftid").val(data.we_menu_leftid);
$("#editwe_menu_type").val(data.we_menu_type);
$("#editwe_menu_typeval").val(data.we_menu_typeval);
});
}else{
layer.alert(data.info, {icon: 5});
}
}, "json");
return false;
});
});
/*************************************************************************** 单图/多图操作********************************************************/
/* 单图上传 */
$("#file0").change(function(){
var objUrl = getObjectURL(this.files[0]) ;
console.log("objUrl = "+objUrl) ;
if (objUrl) {
$("#img0").attr("src", objUrl) ;
}
}) ;
function getObjectURL(file) {
var url = null ;
if (window.createObjectURL!=undefined) { // basic
$("#oldcheckpic").val("nopic");
url = window.createObjectURL(file) ;
} else if (window.URL!=undefined) { // mozilla(firefox)
$("#oldcheckpic").val("nopic");
url = window.URL.createObjectURL(file) ;
} else if (window.webkitURL!=undefined) { // webkit or chrome
$("#oldcheckpic").val("nopic");
url = window.webkitURL.createObjectURL(file) ;
}
return url ;
}
function backpic(picurl){
$("#img0").attr("src",picurl);//还原修改前的图片
$("input[name='file0']").val("");//清空文本框的值
$("input[name='oldcheckpic']").val(picurl);//清空文本框的值
}
/* 新闻多图删除 */
function delall(id,url){
$('#id'+id).hide();
var str=$('#pic_oldlist').val();//最原始的完整路径
var surl=url+',';
var pic_newold=str.replace(surl,"");
$('#pic_oldlist').val(pic_newold);
}
/*************************************************************************** 数据备份还原********************************************************/
/* 数据库备份、优化、修复 */
(function($){
$("a[id^=optimize_]").click(function(){
$.get(this.href,function(data) {
if(data.status){
layer.alert(data.info, {icon: 6});
}else{
layer.alert(data.info, {icon: 5});
}
});
return false;
});
$("a[id^=repair_]").click(function(){
$.get(this.href,function(data) {
if(data.status){
layer.alert(data.info, {icon: 6});
}else{
layer.alert(data.info, {icon: 5});
}
});
return false;
});
var $form = $("#export-form"), $export = $("#export"), tables
$optimize = $("#optimize"), $repair = $("#repair");
$optimize.add($repair).click(function(){
$.post(this.href, $form.serialize(), function(data){
if(data.status){
layer.alert(data.info, {icon: 6}, function(index){
layer.close(index);
window.location.href=data.url;
});
} else {
layer.alert(data.info, {icon: 5}, function(index){
layer.close(index);
});
}
setTimeout(function(){
$('#top-alert').find('button').click();
$(that).removeClass('disabled').prop('disabled',false);
},1500);
}, "json");
return false;
});
$export.click(function(){
$export.parent().children().addClass("disabled");
$export.html("正在发送备份请求...");
$.post(
$form.attr("action"),
$form.serialize(),
function(data){
if(data.status){
tables = data.tables;
$export.html(data.info + "开始备份,请不要关闭本页面!");
backup(data.tab);
window.onbeforeunload = function(){ return "正在备份数据库,请不要关闭!" }
} else {
layer.alert(data.info, {icon: 5});
$export.parent().children().removeClass("disabled");
$export.html("立即备份");
setTimeout(function(){
$('#top-alert').find('button').click();
$(that).removeClass('disabled').prop('disabled',false);
},1500);
}
},
"json"
);
return false;
});
function backup(tab, status){
status && showmsg(tab.id, "开始备份...(0%)");
$.get($form.attr("action"), tab, function(data){
if(data.status){
showmsg(tab.id, data.info);
if(!$.isPlainObject(data.tab)){
$export.parent().children().removeClass("disabled");
$export.html("备份完成,点击重新备份");
window.onbeforeunload = function(){ return null }
return;
}
backup(data.tab, tab.id != data.tab.id);
} else {
updateAlert(data.info,'alert-error');
$export.parent().children().removeClass("disabled");
$export.html("立即备份");
setTimeout(function(){
$('#top-alert').find('button').click();
$(that).removeClass('disabled').prop('disabled',false);
},1500);
}
}, "json");
}
function showmsg(id, msg){
$form.find("input[value=" + tables[id] + "]").closest("tr").find(".info").html(msg);
}
})(jQuery);
/*************************************************************************** 其它********************************************************/
/* textarea字数提示 */
$(function(){
$('textarea.limited').maxlength({
'feedback' : '.charsLeft',
});
$('textarea.limited1').maxlength({
'feedback' : '.charsLeft1',
});
$('textarea.limited2').maxlength({
'feedback' : '.charsLeft2',
});
$('textarea.limited3').maxlength({
'feedback' : '.charsLeft3',
});
$('textarea.limited4').maxlength({
'feedback' : '.charsLeft4',
});
$('textarea.limited5').maxlength({
'feedback' : '.charsLeft5',
});
});
$(function () { $("[data-toggle='tooltip']").tooltip(); });
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "readme_renderer"
__summary__ = (
'readme_renderer is a library for rendering "readme" ' "descriptions for Warehouse"
)
__uri__ = "https://github.com/pypa/readme_renderer"
__version__ = "30.0"
__author__ = "The Python Packaging Authority"
__email__ = "admin@mail.pypi.org"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("specialchar",
"ca",
{
euro: "Símbol d'euro",
lsquo: "Signe de cometa simple esquerra",
rsquo: "Signe de cometa simple dreta",
ldquo: "Signe de cometa doble esquerra",
rdquo: "Signe de cometa doble dreta",
ndash: "Guió",
mdash: "Guió baix",
iexcl: "Signe d'exclamació inversa",
cent: "Símbol de percentatge",
pound: "Símbol de lliura",
curren: "Símbol de moneda",
yen: "Símbol de Yen",
brvbar: "Barra trencada",
sect: "Símbol de secció",
uml: "Dièresi",
copy: "Símbol de Copyright",
ordf: "Indicador ordinal femení",
laquo: "Signe de cometes angulars esquerra",
not: "Símbol de negació",
reg: "Símbol registrat",
macr: "Macron",
deg: "Símbol de grau",
sup2: "Superíndex dos",
sup3: "Superíndex tres",
acute: "Accent agut",
micro: "Símbol de micro",
para: "Símbol de calderó",
middot: "Punt volat",
cedil: "Ce trencada",
sup1: "Superíndex u",
ordm: "Indicador ordinal masculí",
raquo: "Signe de cometes angulars dreta",
frac14: "Fracció vulgar un quart",
frac12: "Fracció vulgar una meitat",
frac34: "Fracció vulgar tres quarts",
iquest: "Símbol d'interrogació invertit",
Agrave: "Lletra majúscula llatina A amb accent greu",
Aacute: "Lletra majúscula llatina A amb accent agut",
Acirc: "Lletra majúscula llatina A amb circumflex",
Atilde: "Lletra majúscula llatina A amb titlla",
Auml: "Lletra majúscula llatina A amb dièresi",
Aring: "Lletra majúscula llatina A amb anell superior",
AElig: "Lletra majúscula llatina Æ",
Ccedil: "Lletra majúscula llatina C amb ce trencada",
Egrave: "Lletra majúscula llatina E amb accent greu",
Eacute: "Lletra majúscula llatina E amb accent agut",
Ecirc: "Lletra majúscula llatina E amb circumflex",
Euml: "Lletra majúscula llatina E amb dièresi",
Igrave: "Lletra majúscula llatina I amb accent greu",
Iacute: "Lletra majúscula llatina I amb accent agut",
Icirc: "Lletra majúscula llatina I amb circumflex",
Iuml: "Lletra majúscula llatina I amb dièresi",
ETH: "Lletra majúscula llatina Eth",
Ntilde: "Lletra majúscula llatina N amb titlla",
Ograve: "Lletra majúscula llatina O amb accent greu",
Oacute: "Lletra majúscula llatina O amb accent agut",
Ocirc: "Lletra majúscula llatina O amb circumflex",
Otilde: "Lletra majúscula llatina O amb titlla",
Ouml: "Lletra majúscula llatina O amb dièresi",
times: "Símbol de multiplicació",
Oslash: "Lletra majúscula llatina O amb barra",
Ugrave: "Lletra majúscula llatina U amb accent greu",
Uacute: "Lletra majúscula llatina U amb accent agut",
Ucirc: "Lletra majúscula llatina U amb circumflex",
Uuml: "Lletra majúscula llatina U amb dièresi",
Yacute: "Lletra majúscula llatina Y amb accent agut",
THORN: "Lletra majúscula llatina Thorn",
szlig: "Lletra minúscula llatina sharp s",
agrave: "Lletra minúscula llatina a amb accent greu",
aacute: "Lletra minúscula llatina a amb accent agut",
acirc: "Lletra minúscula llatina a amb circumflex",
atilde: "Lletra minúscula llatina a amb titlla",
auml: "Lletra minúscula llatina a amb dièresi",
aring: "Lletra minúscula llatina a amb anell superior",
aelig: "Lletra minúscula llatina æ",
ccedil: "Lletra minúscula llatina c amb ce trencada",
egrave: "Lletra minúscula llatina e amb accent greu",
eacute: "Lletra minúscula llatina e amb accent agut",
ecirc: "Lletra minúscula llatina e amb circumflex",
euml: "Lletra minúscula llatina e amb dièresi",
igrave: "Lletra minúscula llatina i amb accent greu",
iacute: "Lletra minúscula llatina i amb accent agut",
icirc: "Lletra minúscula llatina i amb circumflex",
iuml: "Lletra minúscula llatina i amb dièresi",
eth: "Lletra minúscula llatina eth",
ntilde: "Lletra minúscula llatina n amb titlla",
ograve: "Lletra minúscula llatina o amb accent greu",
oacute: "Lletra minúscula llatina o amb accent agut",
ocirc: "Lletra minúscula llatina o amb circumflex",
otilde: "Lletra minúscula llatina o amb titlla",
ouml: "Lletra minúscula llatina o amb dièresi",
divide: "Símbol de divisió",
oslash: "Lletra minúscula llatina o amb barra",
ugrave: "Lletra minúscula llatina u amb accent greu",
uacute: "Lletra minúscula llatina u amb accent agut",
ucirc: "Lletra minúscula llatina u amb circumflex",
uuml: "Lletra minúscula llatina u amb dièresi",
yacute: "Lletra minúscula llatina y amb accent agut",
thorn: "Lletra minúscula llatina thorn",
yuml: "Lletra minúscula llatina y amb dièresi",
OElig: "Lligadura majúscula llatina OE",
oelig: "Lligadura minúscula llatina oe",
372: "Lletra majúscula llatina W amb circumflex",
374: "Lletra majúscula llatina Y amb circumflex",
373: "Lletra minúscula llatina w amb circumflex",
375: "Lletra minúscula llatina y amb circumflex",
sbquo: "Signe de cita simple baixa-9",
8219: "Signe de cita simple alta-invertida-9",
bdquo: "Signe de cita doble baixa-9",
hellip: "Punts suspensius",
trade: "Símbol de marca registrada",
9658: "Punter negre apuntant cap a la dreta",
bull: "Vinyeta",
rarr: "Fletxa cap a la dreta",
rArr: "Doble fletxa cap a la dreta",
hArr: "Doble fletxa esquerra dreta",
diams: "Vestit negre diamant",
asymp: "Gairebé igual a"
});
|
$(function() {
var bodyshortcut = false;
function ensure_bodyshortcut() {
if ( bodyshortcut == true )
return;
var bsc = $('<div class="bodyshortcut"> </div>');
bsc.insertAfter($('div.body h1:first'));
bodyshortcut = true;
};
// if it's an API page, show the module name.
var pagename = location.pathname.split('/');
var is_api = false;
pagename = pagename[pagename.length - 1];
if (pagename.search('api-') == 0) {
pagename = pagename.substr(4, pagename.length - 9);
ensure_bodyshortcut();
var modulename = $('<div class="left">Module: <a href="#">' + pagename + '</a></div>')
modulename.appendTo($('div.bodyshortcut'));
is_api = true;
}
// insert breaker only for the first data/class/function found.
var apibreaker = false;
$('div.body dl[class]').each(function (i1, elem) {
// these are first level class: attribute and method are inside class.
if (!$(elem).hasClass('data') &&
!$(elem).hasClass('class') &&
!$(elem).hasClass('exception') &&
!$(elem).hasClass('function'))
return;
// dont accept dl inside dl
if ($(elem).parents().filter('dl').length > 0)
return;
$(elem).addClass('api-level');
if ( apibreaker == true )
return;
$('<div id="api"></div>')
.attr('id', 'api')
.html(
$('<h2>API ' +
'<a id="api-toggle-desc" class="showed">Hide Description ⇑</a>' +
'</h2>')
)
.insertBefore(elem);
apibreaker = true;
});
$('div.body dl[class] dt')
.on("mouseenter", function() { $(this).addClass('hover'); })
.on("mouseleave", function() { $(this).removeClass('hover'); });
if ( apibreaker == true ) {
ensure_bodyshortcut();
var apilink = $('<div class="navlink right"><a id="api-link" href="#api">Jump to API</a> ⇓</div>');
apilink.insertBefore($('div.bodyshortcut'));
}
$('#api-toggle-desc').on("click", function() {
if ($(this).hasClass('showed')) {
$('div.body dl.api-level > dd p').hide();
$('div.body dl.api-level > dd pre').hide();
$('div.body dl.api-level > dd blockquote').hide();
$('div.body dl.api-level > dd ul').hide();
$(this).removeClass('showed');
$(this).html('Show Descriptions ⇓');
$.cookie('kivy.toggledesc', 'true');
} else {
$('div.body dl.api-level > dd p').show();
$('div.body dl.api-level > dd pre').show();
$('div.body dl.api-level > dd blockquote').show();
$('div.body dl.api-level > dd ul').show();
$(this).addClass('showed');
$(this).html('Hide Descriptions ⇑');
$.cookie('kivy.toggledesc', 'false');
}
});
$('div.body dl.api-level dt').on("click", function() {
$(this).next().children().toggle();
});
if ( $.cookie('kivy.toggledesc') == 'true' ) {
$('div.body dl.api-level > dd > dl > dd').hide();
$('#api-toggle-desc').removeClass('showed');
$('#api-toggle-desc').html('Show Descriptions ⇓');
}
if ( $.cookie('kivy.toggleall') == 'true' ) {
$('div.body dl.api-level > dd').hide();
$('#api-toggle').removeClass('showed');
$('#api-toggle').html('Expand All ⇓');
}
//----------------------------------------------------------------------------
// Reduce the TOC page
//----------------------------------------------------------------------------
var ul = $('div.sphinxsidebarwrapper h3:eq(1) + ul > li > ul');
$('div.sphinxsidebarwrapper h3:eq(1) + ul').detach();
ul.insertAfter($('div.sphinxsidebarwrapper h3:eq(1)'));
$("div.sphinxsidebarwrapper ul").each(function() {
if ($(this).children().length < 1)
$(this).remove()
});
//----------------------------------------------------------------------------
// Menu navigation
//----------------------------------------------------------------------------
$('div.sphinxsidebarwrapper > ul > li > a').each(function(index, item) {
$(item)
.attr('href', '#')
.addClass('mainlevel');
if ( !is_api ) {
$(item)
.bind('mousedown', function() {
$('div.sphinxsidebar ul li ul').filter(function (index, child) {
if (child != $(item).parent().children('ul').get(0)) return child;
}).slideUp();
$(item).parent().children('ul').slideToggle();
});
}
})
$('div.sphinxsidebarwrapper li.current').parent().show();
if ( !is_api ) {
$('div.sphinxsidebarwrapper ul li').each(function(index, item) {
if ($(item).children('ul').length > 0) {
$(item).children('a').addClass('togglable');
}
});
}
// FIXME
$('div.sphinxsidebar a[href$="api-kivy.html"]').parent().parent().addClass('api-index');
$('div.sphinxsidebar a[href$="api-kivy.utils.html"]').parent().parent().addClass('api-index');
$('li.current.toctree-l2').slice(0, -1).removeClass('current');
$('ul.api-index a').each(function(index, item) {
var url = $(item).attr('href').slice(0, -5);
if (url == '') {
$(item).attr('href', location.pathname);
url = location.pathname.slice(0, -5);
}
url = url.substr(url.search('api-') + 4);
$(item).empty().append(url);
});
// Hide API section if we are not in the API.
// or hide all the others sections if we are in the API
if ( is_api ) {
$('div.sphinxsidebarwrapper > ul > li > ul').filter(
function(index, item) {
if (! $(item).hasClass('api-index'))
return item;
}).parent().hide();
$('.nav-api').addClass('current');
$('body').addClass('is-api');
} else {
$('div.sphinxsidebarwrapper > ul > li > ul').filter(
function(index, item) {
if ($(item).hasClass('api-index'))
return item;
}).parent().hide();
$('.nav-guides').addClass('current');
}
if ( is_api ) {
$('.toc').hide();
// Resolve API version
function read_version(item, default_version) {
if ( item === undefined )
return default_version;
var version = item.find('p').text();
if ( version == "" )
return default_version;
item.detach();
version = version.replace('New in version ', '');
if ( version.substr(-1) == '.' )
version = version.substr(0, version.length - 1);
return version;
}
//function read_version(item, version) { return version; }
// get module version
var module_version = read_version($('div.body > div.section > div.versionadded'), '1.0.0');
var html_version = '<span class="versionadded">Added in <span>' + module_version + '</span></span>';
$('div.bodyshortcut').append(html_version);
// resolve class version, default to module if nothing has been found
$('div.section > dl[class]').each(function (i1, el_class) {
var rel_class = $(el_class);
var class_version = read_version(
rel_class.find('> dd > div.versionadded'), module_version);
var html_version = '<span class="versionadded">Added in <span>' + class_version + '</span></span>';
rel_class.find('> dt').append(html_version);
// resolve method / attr version
rel_class.find('> dd > dl[class]').each(function (i2, el_methattr) {
var rel_methattr = $(el_methattr);
var methattr_version = read_version(
rel_methattr.find('> dd > div.versionadded'), class_version);
var html_version = '<span class="versionadded">Added in <span>' + methattr_version + '</span></span>';
rel_methattr.find('> dt').append(html_version);
});
});
} else {
if ($('.toc > ul > li> ul').length < 1)
$('.toc').hide();
var section_title = $('li.toctree-l1.current > a').text();
$('div.body h1:eq(0)').prepend(section_title + ' » ');
}
});
|
class LZW {
constructor(){
this.kw = [];
this.tokens = [];
this.tk_id = 0;
this.tabla = []
this.nivel = 1;
this.resultado = '';
this.resultadoDecode = null;
}
encode = (cadena , key) => {
this.kw = [] ;
var dato = (cadena + "").split(""); // k Se convierte en la cadena..
this.tokens = []
this.tk_id = 0;
var i = 0;
var ascii = key;
while(i<dato.length){
if(this.search(dato[i]) == false){
this.tokens.push({id:String(dato[i]), token:parseInt(this.tk_id)});
this.tk_id ++;
}
i++;
}
var salida = [];
var k = dato[0];
this.tabla.push({w:" ", k:k, wK:String(k), dic:" ", salida:" " ,nivel:this.nivel})
this.nivel ++;
var w = k; // p
for(var i=1;i<dato.length;i++){
k = dato[i] // A
if(this.searchDic(String(w)+String(k))==false){ // AP
this.tabla.push({w:w, k:k, wK:String(w+k), dic:String(w+k)+" "+this.tk_id,salida:ascii,nivel:this.nivel})
this.nivel ++;
salida.push(w.length > 1 ? this.kw[w] : w.charCodeAt(0));
this.tokens.push({id:String(w)+String(k), token:parseInt(this.tk_id)})
this.kw[String(w)+String(k)] = ascii
this.kw.push({id:String(w)+String(k), token:this.tk_id, salida:ascii})
this.tk_id++;
w = k;
ascii++;
}else{
this.tabla.push({w:w, k:k, wK:String(w+k), dic:" ", salida: " ", nivel:this.nivel})
this.nivel ++;
w += k
}
}
this.tabla.push({w:w, k:" ", wK:String(w), dic:" ", salida: " ", nivel:this.nivel})
this.nivel ++;
salida.push(w.length > 1 ? this.kw[w] : w.charCodeAt(0));
for (var i=0; i<salida.length; i++) {
salida[i] = String.fromCharCode(salida[i]);
}
this.resultado = salida.join("")
return salida.join("")
}
decode(key) {
var texto = this.resultado
var diccionario = {};
var dato = (texto + "").split("");
var temp = dato[0];
var regresoFrase = temp;
var salida = [temp];
var ascii = key;
var palabbra;
for (var i=1; i<dato.length; i++) {
var aux = dato[i].charCodeAt(0);
if (aux < key) {
palabbra = dato[i];
}
else {
palabbra = diccionario[aux] ? diccionario[aux] : (regresoFrase + temp);
}
salida.push(palabbra);
temp = palabbra.charAt(0);
diccionario[ascii] = regresoFrase + temp;
ascii++;
regresoFrase = palabbra;
}
this.resultado = salida.join("")
return this.resultado;
}
search = (dato) => {
var i = 0;
while(i<this.tokens.length){
if(this.tokens[i].id == dato){
return true
}
i++;
}
return false
}
searchDic = (dato) => {
for(var i=0;i<this.kw.length;i++){
if(this.kw[i].id == dato){
return true;
}
}
return false;
}
// Grafica
setNodesDataSet = () => { // Esto Genera los nodos de Vis.
var dot = [];
var i = 0;
var nodoId = 0;
dot.push({id:nodoId, label:"w", level:0});
nodoId++;
dot.push({id:nodoId, label:"k", level:0});
nodoId++;
dot.push({id:nodoId, label:"wK", level:0});
nodoId++;
dot.push({id:nodoId, label:"Agregar a Diccionario", level:0});
nodoId++;
dot.push({id:nodoId, label:"Salida", level:0});
nodoId++;
while(i<this.tabla.length){
dot.push({id:nodoId, label:String(this.tabla[i].w), level:this.tabla[i].nivel});
nodoId++;
dot.push({id:nodoId, label:String(this.tabla[i].k), level:this.tabla[i].nivel});
nodoId++;
dot.push({id:nodoId, label:String(this.tabla[i].wK), level:this.tabla[i].nivel});
nodoId++;
dot.push({id:nodoId, label:String(this.tabla[i].dic), level:this.tabla[i].nivel});
nodoId++;
dot.push({id:nodoId, label:String(this.tabla[i].salida), level:this.tabla[i].nivel});
nodoId++;
i++;
}
dot.push({id:nodoId, label:this.resultado, level:this.nivel});
nodoId++;
if(this.resultadoDecode!=null){
dot.push({id:nodoId, label:this.resultadoDecode, level:parseInt(this.nivel+1)});
}
return dot;
}
setEdgesDataSet = () => {
var dot = [];
var i = 0;
var nodoId = 0;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
nodoId++;
while(i<this.tabla.length){
if(this.tabla[i+1]!=null){
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
nodoId++;
}else{
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+1), arrows: "to"});
nodoId++;
dot.push({from:parseInt(nodoId), to:parseInt(nodoId+5), arrows: "to"});
nodoId++;
}
i++;
}
dot.push({from:parseInt(nodoId-1), to:parseInt(nodoId), arrows: "to"});
nodoId++;
if(this.resultadoDecode!=null){
dot.push({from:parseInt(nodoId-1), to:parseInt(nodoId), arrows: "to"});
}
return dot;
}
generateJSON = () => {
return String(this.resultado)
}
}
export default LZW;
// module.exports = LZW;
|
const unionBy = (a, b, fn) => {
const s = new Set(a.map(v => fn(v)));
return Array.from(new Set([...a, ...b.filter(x => !s.has(fn(x)))]));
};
module.exports = unionBy;
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Internal utiltiy functions for implementing TransitionKernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
tfe = tf.contrib.eager
__all__ = [
'choose',
'is_list_like',
'is_namedtuple_like',
'make_name',
'maybe_call_fn_and_grads',
'safe_sum',
'set_doc',
'smart_for_loop',
]
def is_list_like(x):
"""Helper which returns `True` if input is `list`-like."""
return isinstance(x, (tuple, list))
def is_namedtuple_like(x):
"""Helper which returns `True` if input is `collections.namedtuple`-like."""
try:
for fn in x._fields:
_ = getattr(x, fn)
return True
except AttributeError:
return False
def make_name(super_name, default_super_name, sub_name):
"""Helper which makes a `str` name; useful for tf.name_scope."""
name = super_name if super_name is not None else default_super_name
if sub_name is not None:
name += '_' + sub_name
return name
def _choose_base_case(is_accepted,
accepted,
rejected,
name=None):
"""Helper to `choose` which expand_dims `is_accepted` and applies tf.where."""
def _expand_is_accepted_like(x):
"""Helper to expand `is_accepted` like the shape of some input arg."""
with tf.name_scope('expand_is_accepted_like'):
expand_shape = tf.concat([
tf.shape(is_accepted),
tf.ones([tf.rank(x) - tf.rank(is_accepted)],
dtype=tf.int32),
], axis=0)
multiples = tf.concat([
tf.ones([tf.rank(is_accepted)], dtype=tf.int32),
tf.shape(x)[tf.rank(is_accepted):],
], axis=0)
m = tf.tile(tf.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(m.shape.merge_with(x.shape))
return m
def _where(accepted, rejected):
accepted = tf.convert_to_tensor(accepted, name='accepted')
rejected = tf.convert_to_tensor(rejected, name='rejected')
r = tf.where(_expand_is_accepted_like(accepted), accepted, rejected)
r.set_shape(r.shape.merge_with(accepted.shape.merge_with(rejected.shape)))
return r
with tf.name_scope(name, 'choose', values=[
is_accepted, accepted, rejected]):
if not is_list_like(accepted):
return _where(accepted, rejected)
return [(choose(is_accepted, a, r, name=name) if is_namedtuple_like(a)
else _where(a, r))
for a, r in zip(accepted, rejected)]
def choose(is_accepted, accepted, rejected, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
if not is_namedtuple_like(accepted):
return _choose_base_case(is_accepted, accepted, rejected, name=name)
if not isinstance(accepted, type(rejected)):
raise TypeError('Type of `accepted` ({}) must be identical to '
'type of `rejected` ({})'.format(
type(accepted).__name__,
type(rejected).__name__))
return type(accepted)(**dict(
[(fn,
choose(is_accepted,
getattr(accepted, fn),
getattr(rejected, fn),
name=name))
for fn in accepted._fields]))
def safe_sum(x, alt_value=-np.inf, name=None):
"""Elementwise adds list members, replacing non-finite results with alt_value.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
"""
with tf.name_scope(name, 'safe_sum', [x, alt_value]):
if not is_list_like(x):
raise TypeError('Expected list input.')
if not x:
raise ValueError('Input should not be empty.')
n = np.int64(len(x))
in_shape = x[0].shape
x = tf.stack(x, axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf. Thus
# we will replace such rows with the `alt_value`. Typically the `alt_value`
# is chosen so the `MetropolisHastings` `TransitionKernel` always rejects
# the proposal. rejection.
# Regarding the following float-comparisons, recall comparing with NaN is
# always False, i.e., we're implicitly capturing NaN and explicitly
# capturing +/- Inf.
is_sum_determinate = (
tf.reduce_all(tf.is_finite(x) | (x >= 0.), axis=-1) &
tf.reduce_all(tf.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = tf.tile(
is_sum_determinate[..., tf.newaxis],
multiples=tf.concat([tf.ones(tf.rank(x) - 1, dtype=tf.int64), [n]],
axis=0))
alt_value = np.array(alt_value, x.dtype.as_numpy_dtype)
x = tf.where(is_sum_determinate, x, tf.fill(tf.shape(x), value=alt_value))
x = tf.reduce_sum(x, axis=-1)
x.set_shape(x.shape.merge_with(in_shape))
return x
def set_doc(value):
"""Decorator to programmatically set a function docstring."""
def _doc(func):
func.__doc__ = value
return func
return _doc
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if tf.executing_eagerly():
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def make_fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda *args: fn(*args)[i]
grads = [
tfe.gradients_function(make_fn_slice(i))(*fn_arg_list)[i]
for i in range(len(result))
]
else:
grads = tfe.gradients_function(fn)(*fn_arg_list)
else:
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
grads = [tf.gradients(result[i], fn_arg_list[i])[0]
for i in range(len(result))]
else:
grads = tf.gradients(result, fn_arg_list)
return result, grads
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.name_scope(name, 'maybe_call_fn_and_grads',
[fn_arg_list, result, grads]):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(r.dtype.is_floating
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads
def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars,
parallel_iterations=10, name=None):
"""Construct a for loop, preferring a python loop if `n` is staticaly known.
Given `loop_num_iter` and `body_fn`, return an op corresponding to executing
`body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into
the next iteration.
If `loop_num_iter` is statically known, the op is constructed via python for
loop, and otherwise a `tf.while_loop` is used.
Args:
loop_num_iter: `Integer` `Tensor` representing the number of loop
iterations.
body_fn: Callable to be executed `loop_num_iter` times.
initial_loop_vars: Listlike object of `Tensors` to be passed in to
`body_fn`'s first execution.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
Default value: `10`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "smart_for_loop").
Returns:
result: `Tensor` representing applying `body_fn` iteratively `n` times.
"""
with tf.name_scope(
name, 'smart_for_loop', [loop_num_iter, initial_loop_vars]):
loop_num_iter_ = tf.contrib.util.constant_value(tf.convert_to_tensor(
loop_num_iter, dtype=tf.int64, name='loop_num_iter'))
if loop_num_iter_ is None or tf.contrib.eager.executing_eagerly():
return tf.while_loop(
cond=lambda i, *args: i < loop_num_iter,
body=lambda i, *args: [i + 1] + list(body_fn(*args)),
loop_vars=[np.int64(0)] + initial_loop_vars,
parallel_iterations=parallel_iterations
)[1:]
result = initial_loop_vars
for _ in range(loop_num_iter_):
result = body_fn(*result)
return result
|
import numpy as np
from onadata.apps.api.tools import DECIMAL_PRECISION
from onadata.libs.data.query import get_field_records, get_numeric_fields
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def get_mean(values):
return np.mean(values)
def get_median(values, axis=None):
return np.median(values, axis)
def get_mode(values, axis=0):
"""
Adapted from
https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py#L568
"""
a, axis = _chk_asarray(values, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def get_median_for_field(field, xform):
return np.median(get_field_records(field, xform))
def get_median_for_numeric_fields_in_form(xform, field=None):
data = {}
for field_name in [field] if field else get_numeric_fields(xform):
median = get_median_for_field(field_name, xform)
data.update({field_name: median})
return data
def get_mean_for_field(field, xform):
return np.mean(get_field_records(field, xform))
def get_mean_for_numeric_fields_in_form(xform, field):
data = {}
for field_name in [field] if field else get_numeric_fields(xform):
mean = get_mean_for_field(field_name, xform)
data.update({field_name: np.round(mean, DECIMAL_PRECISION)})
return data
def get_mode_for_field(field, xform):
a = np.array(get_field_records(field, xform))
m, count = get_mode(a)
return m
def get_mode_for_numeric_fields_in_form(xform, field=None):
data = {}
for field_name in [field] if field else get_numeric_fields(xform):
mode = get_mode_for_field(field_name, xform)
data.update({field_name: np.round(mode, DECIMAL_PRECISION)})
return data
def get_min_max_range_for_field(field, xform):
a = np.array(get_field_records(field, xform))
_max = np.max(a)
_min = np.min(a)
_range = _max - _min
return _min, _max, _range
def get_min_max_range(xform, field=None):
data = {}
for field_name in [field] if field else get_numeric_fields(xform):
_min, _max, _range = get_min_max_range_for_field(field_name, xform)
data[field_name] = {'max': _max, 'min': _min, 'range': _range}
return data
def get_all_stats(xform, field=None):
data = {}
for field_name in [field] if field else get_numeric_fields(xform):
_min, _max, _range = get_min_max_range_for_field(field_name, xform)
mode = get_mode_for_field(field_name, xform)
mean = get_mean_for_field(field_name, xform)
median = get_median_for_field(field_name, xform)
data[field_name] = {
'mean': np.round(mean, DECIMAL_PRECISION),
'median': median,
'mode': np.round(mode, DECIMAL_PRECISION),
'max': _max,
'min': _min,
'range': _range
}
return data
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('show_data_table')
@click.argument("data_table_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, data_table_id):
"""Get details of a given data table.
Output:
A description of the given data table and its content.
For example::
{'columns': ['value', 'dbkey', 'name', 'path'],
'fields': [['test id',
'test',
'test name',
'/opt/galaxy-dist/tool-data/test/seq/test id.fa']],
'model_class': 'TabularToolDataTable',
'name': 'all_fasta'}
"""
return ctx.gi.tool_data.show_data_table(data_table_id)
|
/* eslint-disable react/jsx-one-expression-per-line */
import React from 'react';
import useToggle from 'react-use/lib/useToggle';
import Auth from '@arcblock/did-react/lib/Auth';
import Button from '@arcblock/ux/lib/Button';
import api from '../../libs/api';
export default function TransferTokenOut() {
const [isOpen, setOpen] = useToggle(false);
return (
<React.Fragment>
<Button
color="secondary"
variant="contained"
size="large"
className="action"
onClick={() => setOpen(true)}>
Send 1 Asset to Application
</Button>
{isOpen && (
<Auth
responsive
action="transfer_asset_out"
checkFn={api.get}
socketUrl={api.socketUrl}
onClose={() => setOpen()}
onSuccess={() => window.location.reload()}
messages={{
title: 'Transfer Required',
scan: 'Scan QR code to complete asset transfer',
confirm: 'Confirm on your ABT Wallet',
success: 'Asset transfer sent!',
}}
/>
)}
</React.Fragment>
);
}
|
/* eslint-disable */
export const indent2em = tinymce.PluginManager.add('indent2em', function(editor, url) {
var pluginName='首行缩进';
var global$1 = tinymce.util.Tools.resolve('tinymce.util.Tools');
var indent2em_val = editor.getParam('indent2em_val', '2em');
var doAct = function () {
var dom = editor.dom;
var blocks = editor.selection.getSelectedBlocks();
var act = '';
global$1.each(blocks, function (block) {
if(act==''){
act = dom.getStyle(block,'text-indent')==indent2em_val ? 'remove' : 'add';
}
if( act=='add' ){
dom.setStyle(block, 'text-indent', indent2em_val);
}else{
var style=dom.getAttrib(block,'style');
style = style.replace(/text-indent:[\s]*2em;/ig,'');
dom.setAttrib(block,'style',style);
}
});
};
editor.ui.registry.getAll().icons.indent2em || editor.ui.registry.addIcon('indent2em','<svg viewBox="0 0 1024 1024" xmlns="http://www.w3.org/2000/svg" width="24" height="24"><path d="M170.666667 563.2v-102.4H887.466667v102.4zM170.666667 836.266667v-102.4H887.466667v102.4zM512 290.133333v-102.4H887.466667v102.4zM238.933333 341.333333V136.533333l204.8 102.4z" fill="#2c2c2c" p-id="5210"></path></svg>');
var stateSelectorAdapter = function (editor, selector) {
return function (buttonApi) {
return editor.selection.selectorChangedWithUnbind(selector.join(','), buttonApi.setActive).unbind;
};
};
editor.ui.registry.addToggleButton('indent2em', {
icon: 'indent2em',
tooltip: pluginName,
onAction: function () {
doAct();
},
onSetup: stateSelectorAdapter(editor, [
'*[style*="text-indent"]',
'*[data-mce-style*="text-indent"]',
])
});
editor.ui.registry.addMenuItem('indent2em', {
text: pluginName,
onAction: function() {
doAct();
}
});
editor.addCommand('indent2em', doAct );
return {
getMetadata: function () {
return {
name: pluginName,
url: "http://tinymce.ax-z.cn/more-plugins/indent2em.php",
};
}
};
});
|
import { email } from 'src/core/service/validation.service';
import template from './sw-users-permissions-user-detail.html.twig';
import './sw-users-permissions-user-detail.scss';
const { Component, Mixin } = Shopware;
const { Criteria } = Shopware.Data;
const { mapPropertyErrors } = Component.getComponentHelper();
const { warn } = Shopware.Utils.debug;
Component.register('sw-users-permissions-user-detail', {
template,
inject: [
'userService',
'loginService',
'userValidationService',
'integrationService',
'repositoryFactory',
'acl',
],
mixins: [
Mixin.getByName('notification'),
Mixin.getByName('salutation'),
],
shortcuts: {
'SYSTEMKEY+S': 'onSave',
ESCAPE: 'onCancel',
},
data() {
return {
isLoading: false,
userId: '',
user: null,
currentUser: null,
languages: [],
integrations: [],
currentIntegration: null,
mediaItem: null,
newPassword: '',
newPasswordConfirm: '',
isEmailUsed: false,
isUsernameUsed: false,
isIntegrationsLoading: false,
isSaveSuccessful: false,
isModalLoading: false,
showSecretAccessKey: false,
showDeleteModal: null,
skeletonItemAmount: 3,
confirmPasswordModal: false,
};
},
metaInfo() {
return {
title: this.$createTitle(this.identifier),
};
},
computed: {
...mapPropertyErrors('user', [
'firstName',
'lastName',
'email',
'username',
'localeId',
]),
identifier() {
return this.fullName;
},
fullName() {
return this.salutation(this.user, this.$tc('sw-users-permissions.users.user-detail.labelNewUser'));
},
userRepository() {
return this.repositoryFactory.create('user');
},
userCriteria() {
const criteria = new Criteria();
criteria.addAssociation('accessKeys');
criteria.addAssociation('locale');
criteria.addAssociation('aclRoles');
return criteria;
},
aclRoleCriteria() {
const criteria = new Criteria();
// Roles created by apps should not be assignable in the admin
criteria.addFilter(Criteria.equals('app.id', null));
criteria.addFilter(Criteria.equals('deletedAt', null));
return criteria;
},
languageRepository() {
return this.repositoryFactory.create('language');
},
languageCriteria() {
const criteria = new Criteria();
criteria.addAssociation('locale');
criteria.addSorting(Criteria.sort('locale.name', 'ASC'));
criteria.addSorting(Criteria.sort('locale.territory', 'ASC'));
criteria.limit = 500;
return criteria;
},
localeRepository() {
return this.repositoryFactory.create('locale');
},
avatarMedia() {
return this.mediaItem;
},
isError() {
return this.isEmailUsed || this.isUsernameUsed || !this.hasLanguage;
},
hasLanguage() {
return this.user && this.user.localeId;
},
disableConfirm() {
return this.newPassword !== this.newPasswordConfirm || this.newPassword === '' || this.newPassword === null;
},
isCurrentUser() {
if (!this.user || !this.currentUser) {
return false;
}
return this.userId === this.currentUser.id;
},
mediaRepository() {
return this.repositoryFactory.create('media');
},
integrationColumns() {
return [{
property: 'accessKey',
label: this.$tc('sw-users-permissions.users.user-detail.labelAccessKey'),
}];
},
secretAccessKeyFieldType() {
return this.showSecretAccessKey ? 'text' : 'password';
},
languageId() {
return Shopware.State.get('session').languageId;
},
tooltipSave() {
const systemKey = this.$device.getSystemKey();
return {
message: `${systemKey} + S`,
appearance: 'light',
};
},
tooltipCancel() {
return {
message: 'ESC',
appearance: 'light',
};
},
},
watch: {
languageId() {
this.createdComponent();
},
},
created() {
this.createdComponent();
},
methods: {
createdComponent() {
this.isLoading = true;
if (!this.languageId) {
this.isLoading = false;
return;
}
const languagePromise = new Promise((resolve) => {
Shopware.State.commit('context/setApiLanguageId', this.languageId);
resolve(this.languageId);
});
const promises = [
languagePromise,
this.loadLanguages(),
this.loadUser(),
this.loadCurrentUser(),
];
Promise.all(promises).then(() => {
this.isLoading = false;
});
},
loadLanguages() {
return this.languageRepository.search(this.languageCriteria).then((result) => {
this.languages = [];
result.forEach((lang) => {
lang.customLabel = `${lang.locale.translated.name} (${lang.locale.translated.territory})`;
this.languages.push(lang);
});
return this.languages;
});
},
loadUser() {
this.userId = this.$route.params.id;
return this.userRepository.get(this.userId, Shopware.Context.api, this.userCriteria).then((user) => {
this.user = user;
if (this.user.avatarId) {
this.mediaItem = this.user.avatarMedia;
}
this.keyRepository = this.repositoryFactory.create(user.accessKeys.entity, this.user.accessKeys.source);
this.loadKeys();
});
},
loadCurrentUser() {
return this.userService.getUser().then((response) => {
this.currentUser = response.data;
});
},
loadKeys() {
this.integrations = this.user.accessKeys;
},
addAccessKey() {
const newKey = this.keyRepository.create();
this.isModalLoading = true;
newKey.quantityStart = 1;
this.integrationService.generateKey({}, {}, true).then((response) => {
newKey.accessKey = response.accessKey;
newKey.secretAccessKey = response.secretAccessKey;
this.currentIntegration = newKey;
this.isModalLoading = false;
this.showSecretAccessKey = true;
});
},
checkEmail() {
if (!this.user.email) {
return Promise.resolve();
}
if (!email(this.user.email)) {
this.createNotificationError({
title: this.$tc('global.default.error'),
message: this.$tc(
'sw-users-permissions.users.user-detail.notification.invalidEmailErrorMessage',
),
});
return Promise.reject();
}
return this.userValidationService.checkUserEmail({
email: this.user.email,
id: this.user.id,
}).then(({ emailIsUnique }) => {
this.isEmailUsed = !emailIsUnique;
});
},
checkUsername() {
return this.userValidationService.checkUserUsername({
username: this.user.username,
id: this.user.id,
}).then(({ usernameIsUnique }) => {
this.isUsernameUsed = !usernameIsUnique;
});
},
setMediaItem({ targetId }) {
this.mediaRepository.get(targetId).then((media) => {
this.mediaItem = media;
this.user.avatarMedia = media;
this.user.avatarId = targetId;
});
},
onUnlinkLogo() {
this.mediaItem = null;
this.user.avatarMedia = null;
this.user.avatarId = null;
},
onSearch(value) {
this.term = value;
this.clearSelection();
},
saveFinish() {
this.isSaveSuccessful = false;
},
onSave() {
this.confirmPasswordModal = true;
},
saveUser(context) {
this.isSaveSuccessful = false;
this.isLoading = true;
let promises = [];
if (this.currentUser.id === this.user.id) {
promises = [Shopware.Service('localeHelper').setLocaleWithId(this.user.localeId)];
}
return Promise.all(promises).then(
this.checkEmail()
.then(() => {
if (!this.isEmailUsed) {
this.isLoading = true;
const titleSaveError = this.$tc('global.default.error');
const messageSaveError = this.$tc(
'sw-users-permissions.users.user-detail.notification.saveError.message',
0,
{ name: this.fullName },
);
return this.userRepository.save(this.user, context).then(() => {
return this.updateCurrentUser();
}).then(() => {
this.createdComponent();
this.confirmPasswordModal = false;
this.isSaveSuccessful = true;
}).catch((exception) => {
this.createNotificationError({
title: titleSaveError,
message: messageSaveError,
});
warn(this._name, exception.message, exception.response);
this.isLoading = false;
throw exception;
})
.finally(() => {
this.isLoading = false;
});
}
this.createNotificationError({
message: this.$tc(
'sw-users-permissions.users.user-detail.notification.duplicateEmailErrorMessage',
),
});
return Promise.resolve();
})
.catch(() => Promise.reject())
.finally(() => {
this.isLoading = false;
}),
);
},
updateCurrentUser() {
return this.userService.getUser().then((response) => {
const data = response.data;
delete data.password;
return Shopware.State.commit('setCurrentUser', data);
});
},
onCancel() {
this.$router.push({ name: 'sw.users.permissions.index' });
},
setPassword(password) {
if (typeof password === 'string' && password.length <= 0) {
this.$delete(this.user, 'password');
return;
}
this.$set(this.user, 'password', password);
},
onShowDetailModal(id) {
if (!id) {
this.addAccessKey();
return;
}
this.currentIntegration = this.user.accessKeys.get(id);
},
onCloseDetailModal() {
this.currentIntegration = null;
this.showSecretAccessKey = false;
this.isModalLoading = false;
},
onSaveIntegration() {
if (!this.currentIntegration) {
return;
}
if (!this.user.accessKeys.has(this.currentIntegration.id)) {
this.user.accessKeys.add(this.currentIntegration);
}
this.onCloseDetailModal();
},
onCloseDeleteModal() {
this.showDeleteModal = null;
},
onConfirmDelete(id) {
if (!id) {
return;
}
this.onCloseDeleteModal();
this.user.accessKeys.remove(id);
},
onCloseConfirmPasswordModal() {
this.confirmPasswordModal = false;
},
},
});
|
export default {
languageName: "繁體中文 (香港)",
Settings: "設定",
Dashboard: "錶板",
"New Update": "有更新",
Language: "語言",
Appearance: "外觀",
Theme: "主題",
General: "一般",
Version: "版本",
"Check Update On GitHub": "到 Github 查看更新",
List: "列表",
Add: "新增",
"Add New Monitor": "新增監測器",
"Quick Stats": "綜合數據",
Up: "上線",
Down: "離線",
Pending: "待定",
Unknown: "不明",
Pause: "暫停",
Name: "名稱",
Status: "狀態",
DateTime: "日期時間",
Message: "內容",
"No important events": "沒有重要事件",
Resume: "恢復",
Edit: "編輯",
Delete: "刪除",
Current: "目前",
Uptime: "上線率",
"Cert Exp.": "証書期限",
days: "日",
day: "日",
"-day": "日",
hour: "小時",
"-hour": "小時",
checkEverySecond: "每 {0} 秒檢查一次",
"Avg.": "平均",
Response: "反應時間",
Ping: "反應時間",
"Monitor Type": "監測器類型",
Keyword: "關鍵字",
"Friendly Name": "名稱",
URL: "網址 URL",
Hostname: "Hostname",
Port: "Port",
"Heartbeat Interval": "檢查間距",
Retries: "重試數次確定為離線",
retriesDescription: "重試多少次後才判定為離線及傳送通知。如數值為 0 會即判定為離線及傳送通知。",
Advanced: "進階",
ignoreTLSError: "忽略 TLS/SSL 錯誤",
"Upside Down Mode": "反轉模式",
upsideDownModeDescription: "反轉狀態,如網址是可正常瀏覽,會被判定為 '離線/DOWN'",
"Max. Redirects": "跟隨重新導向 (Redirect) 的次數",
maxRedirectDescription: "設為 0 即不跟蹤",
"Accepted Status Codes": "接受為上線的 HTTP 狀態碼",
acceptedStatusCodesDescription: "可多選",
Save: "儲存",
Notifications: "通知",
"Not available, please setup.": "無法使用,需要設定",
"Setup Notification": "設定通知",
Light: "明亮",
Dark: "暗黑",
Auto: "自動",
"Theme - Heartbeat Bar": "監測器列表 狀態條外觀",
Normal: "一般",
Bottom: "下方",
None: "沒有",
Timezone: "時區",
"Search Engine Visibility": "是否允許搜尋器索引",
"Allow indexing": "允許索引",
"Discourage search engines from indexing site": "不建議搜尋器索引",
"Change Password": "變更密碼",
"Current Password": "目前密碼",
"New Password": "新密碼",
"Repeat New Password": "確認新密碼",
passwordNotMatchMsg: "密碼不一致",
"Update Password": "更新密碼",
"Disable Auth": "取消登入認証",
"Enable Auth": "開啟登入認証",
Logout: "登出",
notificationDescription: "新增後,你需要在監測器裡啟用。",
Leave: "離開",
"I understand, please disable": "我明白,請取消登入認証",
Confirm: "確認",
Yes: "是",
No: "否",
Username: "帳號",
Password: "密碼",
"Remember me": "記住我",
Login: "登入",
"No Monitors, please": "沒有監測器,請",
"add one": "新增",
"Notification Type": "通知類型",
"Email": "電郵",
"Test": "測試",
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ..foundations import hparams
from ..lottery.desc import LotteryDesc
from ..models import base
from ..pruning import sparse_global
class Model(base.Model):
def __init__(self, plan, initializer, outputs=10):
super(Model, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, 10)
self.apply(initializer)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
@property
def output_layer_names(self):
return ['fc.weight', 'fc.bias']
@staticmethod
def is_valid_model_name(model_name):
return (model_name.startswith('fashion_cnn'))
@staticmethod
def get_model_from_name(model_name, initializer, outputs=10):
if not Model.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
outputs = outputs or 10
plan = None
return Model(plan, initializer, outputs)
@property
def loss_criterion(self):
return self.criterion
@staticmethod
def default_hparams():
model_hparams = hparams.ModelHparams(
model_name='fashion_cnn',
model_init='kaiming_normal',
batchnorm_init='uniform',
)
dataset_hparams = hparams.DatasetHparams(
dataset_name='fasion_mnist',
batch_size=128
)
training_hparams = hparams.TrainingHparams(
optimizer_name='adam',
lr=0.01,
training_steps='40ep',
momentum=0.5
)
pruning_hparams = sparse_global.PruningHparams(
pruning_strategy='sparse_global',
pruning_fraction=0.2,
pruning_layers_to_ignore='fc.weight'
)
return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)
|
/*! WebUploader 0.1.2 */
/**
* @fileOverview 让内部各个部件的Code可以用[amd](https://github.com/amdjs/amdjs-api/wiki/AMD)模块定义方式组织起来。
*
* AMD API 内部的简单不完全实现,请忽略。只有当WebUploader被合并成一个文件的时候才会引入。
*/
(function( root, factory ) {
var modules = {},
// 内部require, 简单不完全实现。
// https://github.com/amdjs/amdjs-api/wiki/require
_require = function( deps, callback ) {
var args, len, i;
// 如果deps不是数组,则直接返回指定module
if ( typeof deps === 'string' ) {
return getModule( deps );
} else {
args = [];
for( len = deps.length, i = 0; i < len; i++ ) {
args.push( getModule( deps[ i ] ) );
}
return callback.apply( null, args );
}
},
// 内部define,暂时不支持不指定id.
_define = function( id, deps, factory ) {
if ( arguments.length === 2 ) {
factory = deps;
deps = null;
}
_require( deps || [], function() {
setModule( id, factory, arguments );
});
},
// 设置module, 兼容CommonJs写法。
setModule = function( id, factory, args ) {
var module = {
exports: factory
},
returned;
if ( typeof factory === 'function' ) {
args.length || (args = [ _require, module.exports, module ]);
returned = factory.apply( null, args );
returned !== undefined && (module.exports = returned);
}
modules[ id ] = module.exports;
},
// 根据id获取module
getModule = function( id ) {
var module = modules[ id ] || root[ id ];
if ( !module ) {
throw new Error( '`' + id + '` is undefined' );
}
return module;
},
// 将所有modules,将路径ids装换成对象。
exportsTo = function( obj ) {
var key, host, parts, part, last, ucFirst;
// make the first character upper case.
ucFirst = function( str ) {
return str && (str.charAt( 0 ).toUpperCase() + str.substr( 1 ));
};
for ( key in modules ) {
host = obj;
if ( !modules.hasOwnProperty( key ) ) {
continue;
}
parts = key.split('/');
last = ucFirst( parts.pop() );
while( (part = ucFirst( parts.shift() )) ) {
host[ part ] = host[ part ] || {};
host = host[ part ];
}
host[ last ] = modules[ key ];
}
},
exports = factory( root, _define, _require ),
origin;
// exports every module.
exportsTo( exports );
if ( typeof module === 'object' && typeof module.exports === 'object' ) {
// For CommonJS and CommonJS-like environments where a proper window is present,
module.exports = exports;
} else if ( typeof define === 'function' && define.amd ) {
// Allow using this built library as an AMD module
// in another project. That other project will only
// see this AMD call, not the internal modules in
// the closure below.
define([], exports );
} else {
// Browser globals case. Just assign the
// result to a property on the global.
origin = root.WebUploader;
root.WebUploader = exports;
root.WebUploader.noConflict = function() {
root.WebUploader = origin;
};
}
})( this, function( window, define, require ) {
/**
* @fileOverview jQuery or Zepto
*/
define('dollar-third',[],function() {
return window.jQuery || window.Zepto;
});
/**
* @fileOverview Dom operating相关
*/
define('dollar',[
'dollar-third'
], function( _ ) {
return _;
});
/**
* @fileOverview 使用jQuery的Promise
*/
define('promise-third',[
'dollar'
], function( $ ) {
return {
Deferred: $.Deferred,
when: $.when,
isPromise: function( anything ) {
return anything && typeof anything.then === 'function';
}
};
});
/**
* @fileOverview Promise/A+
*/
define('promise',[
'promise-third'
], function( _ ) {
return _;
});
/**
* @fileOverview 基础类方法。
*/
/**
* Web Uploader内部类的detailed说明,以下提及的功能类,都可以在`WebUploader`这个变量中访问到。
*
* As you know, Web Uploader的每个文件都是用过[AMD](https://github.com/amdjs/amdjs-api/wiki/AMD)规范中的`define`组织起来的, 每个Module都会有个module id.
* Defaultmodule id该文件的路径,而此路径将会转化成名字空间存放在WebUploader中。如:
*
* * module `base`:WebUploader.Base
* * module `file`: WebUploader.File
* * module `lib/dnd`: WebUploader.Lib.Dnd
* * module `runtime/html5/dnd`: WebUploader.Runtime.Html5.Dnd
*
*
* 以下文档将可能省略`WebUploader`前缀。
* @module WebUploader
* @title WebUploader API文档
*/
define('base',[
'dollar',
'promise'
], function( $, promise ) {
var noop = function() {},
call = Function.call;
// http://jsperf.com/uncurrythis
// 反科里化
function uncurryThis( fn ) {
return function() {
return call.apply( fn, arguments );
};
}
function bindFn( fn, context ) {
return function() {
return fn.apply( context, arguments );
};
}
function createObject( proto ) {
var f;
if ( Object.create ) {
return Object.create( proto );
} else {
f = function() {};
f.prototype = proto;
return new f();
}
}
/**
* 基础类,提供一些简单常用的方法。
* @class Base
*/
return {
/**
* @property {String} version 当前版本号。
*/
version: '0.1.2',
/**
* @property {jQuery|Zepto} $ 引用依赖的jQuery或者Zepto对象。
*/
$: $,
Deferred: promise.Deferred,
isPromise: promise.isPromise,
when: promise.when,
/**
* @description 简单的浏览器检查结果。
*
* * `webkit` webkit版本号,如果浏览器为非webkit内核,此属性为`undefined`。
* * `chrome` chrome浏览器版本号,如果浏览器为chrome,此属性为`undefined`。
* * `ie` ie浏览器版本号,如果浏览器为非ie,此属性为`undefined`。**暂不支持ie10+**
* * `firefox` firefox浏览器版本号,如果浏览器为非firefox,此属性为`undefined`。
* * `safari` safari浏览器版本号,如果浏览器为非safari,此属性为`undefined`。
* * `opera` opera浏览器版本号,如果浏览器为非opera,此属性为`undefined`。
*
* @property {Object} [browser]
*/
browser: (function( ua ) {
var ret = {},
webkit = ua.match( /WebKit\/([\d.]+)/ ),
chrome = ua.match( /Chrome\/([\d.]+)/ ) ||
ua.match( /CriOS\/([\d.]+)/ ),
ie = ua.match( /MSIE\s([\d\.]+)/ ) ||
ua.match(/(?:trident)(?:.*rv:([\w.]+))?/i),
firefox = ua.match( /Firefox\/([\d.]+)/ ),
safari = ua.match( /Safari\/([\d.]+)/ ),
opera = ua.match( /OPR\/([\d.]+)/ );
webkit && (ret.webkit = parseFloat( webkit[ 1 ] ));
chrome && (ret.chrome = parseFloat( chrome[ 1 ] ));
ie && (ret.ie = parseFloat( ie[ 1 ] ));
firefox && (ret.firefox = parseFloat( firefox[ 1 ] ));
safari && (ret.safari = parseFloat( safari[ 1 ] ));
opera && (ret.opera = parseFloat( opera[ 1 ] ));
return ret;
})( navigator.userAgent ),
/**
* @description operating系统检查结果。
*
* * `android` 如果在android浏览器环境下,此值为对应的android版本号,否则为`undefined`。
* * `ios` 如果在ios浏览器环境下,此值为对应的ios版本号,否则为`undefined`。
* @property {Object} [os]
*/
os: (function( ua ) {
var ret = {},
// osx = !!ua.match( /\(Macintosh\; Intel / ),
android = ua.match( /(?:Android);?[\s\/]+([\d.]+)?/ ),
ios = ua.match( /(?:iPad|iPod|iPhone).*OS\s([\d_]+)/ );
// osx && (ret.osx = true);
android && (ret.android = parseFloat( android[ 1 ] ));
ios && (ret.ios = parseFloat( ios[ 1 ].replace( /_/g, '.' ) ));
return ret;
})( navigator.userAgent ),
/**
* 实现类与类之间的继承。
* @method inherits
* @grammar Base.inherits( super ) => child
* @grammar Base.inherits( super, protos ) => child
* @grammar Base.inherits( super, protos, statics ) => child
* @param {Class} super 父类
* @param {Object | Function} [protos] 子类或者对象。如果对象中包含constructor,子类将是用此属性值。
* @param {Function} [protos.constructor] 子类构造器,不指定的话将创建个临时的直接执行父类构造器的方法。
* @param {Object} [statics] 静态属性或方法。
* @return {Class} 返回子类。
* @example
* function Person() {
* console.log( 'Super' );
* }
* Person.prototype.hello = function() {
* console.log( 'hello' );
* };
*
* var Manager = Base.inherits( Person, {
* world: function() {
* console.log( 'World' );
* }
* });
*
* // 因为没有指定构造器,父类的构造器将会执行。
* var instance = new Manager(); // => Super
*
* // 继承子父类的方法
* instance.hello(); // => hello
* instance.world(); // => World
*
* // 子类的__super__属性指向父类
* console.log( Manager.__super__ === Person ); // => true
*/
inherits: function( Super, protos, staticProtos ) {
var child;
if ( typeof protos === 'function' ) {
child = protos;
protos = null;
} else if ( protos && protos.hasOwnProperty('constructor') ) {
child = protos.constructor;
} else {
child = function() {
return Super.apply( this, arguments );
};
}
// 复制静态方法
$.extend( true, child, Super, staticProtos || {} );
/* jshint camelcase: false */
// 让子类的__super__属性指向父类。
child.__super__ = Super.prototype;
// 构建原型,添加原型方法或属性。
// 暂时用Object.create实现。
child.prototype = createObject( Super.prototype );
protos && $.extend( true, child.prototype, protos );
return child;
},
/**
* 一个不做任何事情的方法。可以用来赋值给Default的callback.
* @method noop
*/
noop: noop,
/**
* 返回一个新的方法,此方法将已指定的`context`来执行。
* @grammar Base.bindFn( fn, context ) => Function
* @method bindFn
* @example
* var doSomething = function() {
* console.log( this.name );
* },
* obj = {
* name: 'Object Name'
* },
* aliasFn = Base.bind( doSomething, obj );
*
* aliasFn(); // => Object Name
*
*/
bindFn: bindFn,
/**
* 引用Console.log如果存在的话,否则引用一个[空函数loop](#WebUploader:Base.log)。
* @grammar Base.log( args... ) => undefined
* @method log
*/
log: (function() {
if ( window.console ) {
return bindFn( console.log, console );
}
return noop;
})(),
nextTick: (function() {
return function( cb ) {
setTimeout( cb, 1 );
};
// @bug 当浏览器不在当前窗口时就停了。
// var next = window.requestAnimationFrame ||
// window.webkitRequestAnimationFrame ||
// window.mozRequestAnimationFrame ||
// function( cb ) {
// window.setTimeout( cb, 1000 / 60 );
// };
// // fix: Uncaught TypeError: Illegal invocation
// return bindFn( next, window );
})(),
/**
* 被[uncurrythis](http://www.2ality.com/2011/11/uncurrying-this.html)的数组slice方法。
* 将用来将非数组对象转化成数组对象。
* @grammar Base.slice( target, start[, end] ) => Array
* @method slice
* @example
* function doSomthing() {
* var args = Base.slice( arguments, 1 );
* console.log( args );
* }
*
* doSomthing( 'ignored', 'arg2', 'arg3' ); // => Array ["arg2", "arg3"]
*/
slice: uncurryThis( [].slice ),
/**
* 生成唯一的ID
* @method guid
* @grammar Base.guid() => String
* @grammar Base.guid( prefx ) => String
*/
guid: (function() {
var counter = 0;
return function( prefix ) {
var guid = (+new Date()).toString( 32 ),
i = 0;
for ( ; i < 5; i++ ) {
guid += Math.floor( Math.random() * 65535 ).toString( 32 );
}
return (prefix || 'wu_') + guid + (counter++).toString( 32 );
};
})(),
/**
* 格式化文件大小, 输出成带单位的字符串
* @method formatSize
* @grammar Base.formatSize( size ) => String
* @grammar Base.formatSize( size, pointLength ) => String
* @grammar Base.formatSize( size, pointLength, units ) => String
* @param {Number} size 文件大小
* @param {Number} [pointLength=2] 精确到的小数点数。
* @param {Array} [units=[ 'B', 'K', 'M', 'G', 'TB' ]] 单位数组。从字节,到千字节,一直往上指定。如果单位数组里面只指定了到了K(千字节),同时文件大小greater thanM, 此方法的输出将还是显示成多少K.
* @example
* console.log( Base.formatSize( 100 ) ); // => 100B
* console.log( Base.formatSize( 1024 ) ); // => 1.00K
* console.log( Base.formatSize( 1024, 0 ) ); // => 1K
* console.log( Base.formatSize( 1024 * 1024 ) ); // => 1.00M
* console.log( Base.formatSize( 1024 * 1024 * 1024 ) ); // => 1.00G
* console.log( Base.formatSize( 1024 * 1024 * 1024, 0, ['B', 'KB', 'MB'] ) ); // => 1024MB
*/
formatSize: function( size, pointLength, units ) {
var unit;
units = units || [ 'B', 'K', 'M', 'G', 'TB' ];
while ( (unit = units.shift()) && size > 1024 ) {
size = size / 1024;
}
return (unit === 'B' ? size : size.toFixed( pointLength || 2 )) +
unit;
}
};
});
/**
* 事件处理类,可以独立使用,也可以扩展给对象使用。
* @fileOverview Mediator
*/
define('mediator',[
'base'
], function( Base ) {
var $ = Base.$,
slice = [].slice,
separator = /\s+/,
protos;
// 根据条件过滤出事件handlers.
function findHandlers( arr, name, callback, context ) {
return $.grep( arr, function( handler ) {
return handler &&
(!name || handler.e === name) &&
(!callback || handler.cb === callback ||
handler.cb._cb === callback) &&
(!context || handler.ctx === context);
});
}
function eachEvent( events, callback, iterator ) {
// 不支持对象,只支持多个event用空格隔开
$.each( (events || '').split( separator ), function( _, key ) {
iterator( key, callback );
});
}
function triggerHanders( events, args ) {
var stoped = false,
i = -1,
len = events.length,
handler;
while ( ++i < len ) {
handler = events[ i ];
if ( handler.cb.apply( handler.ctx2, args ) === false ) {
stoped = true;
break;
}
}
return !stoped;
}
protos = {
/**
* 绑定事件。
*
* `callback`方法在执行时,arguments将会来源于trigger的时候携带的参数。如
* ```javascript
* var obj = {};
*
* // 使得obj有事件行为
* Mediator.installTo( obj );
*
* obj.on( 'testa', function( arg1, arg2 ) {
* console.log( arg1, arg2 ); // => 'arg1', 'arg2'
* });
*
* obj.trigger( 'testa', 'arg1', 'arg2' );
* ```
*
* 如果`callback`中,某一个方法`return false`了,则后续的其他`callback`都不会被执行到。
* 切会影响到`trigger`方法的返回值,为`false`。
*
* `on`还可以用来添加一个特殊事件`all`, 这样所有的事件触发都会响应到。同时此类`callback`中的arguments有一个不同处,
* 就是第一个参数为`type`,记录当前是什么事件在触发。此类`callback`的优先级比脚低,会再正常`callback`执行完后触发。
* ```javascript
* obj.on( 'all', function( type, arg1, arg2 ) {
* console.log( type, arg1, arg2 ); // => 'testa', 'arg1', 'arg2'
* });
* ```
*
* @method on
* @grammar on( name, callback[, context] ) => self
* @param {String} name 事件名,支持多个事件用空格隔开
* @param {Function} callback 事件处理器
* @param {Object} [context] 事件处理器的上下文。
* @return {self} 返回自身,方便链式
* @chainable
* @class Mediator
*/
on: function( name, callback, context ) {
var me = this,
set;
if ( !callback ) {
return this;
}
set = this._events || (this._events = []);
eachEvent( name, callback, function( name, callback ) {
var handler = { e: name };
handler.cb = callback;
handler.ctx = context;
handler.ctx2 = context || me;
handler.id = set.length;
set.push( handler );
});
return this;
},
/**
* 绑定事件,且当handler执行完后,自动解除绑定。
* @method once
* @grammar once( name, callback[, context] ) => self
* @param {String} name 事件名
* @param {Function} callback 事件处理器
* @param {Object} [context] 事件处理器的上下文。
* @return {self} 返回自身,方便链式
* @chainable
*/
once: function( name, callback, context ) {
var me = this;
if ( !callback ) {
return me;
}
eachEvent( name, callback, function( name, callback ) {
var once = function() {
me.off( name, once );
return callback.apply( context || me, arguments );
};
once._cb = callback;
me.on( name, once, context );
});
return me;
},
/**
* 解除事件绑定
* @method off
* @grammar off( [name[, callback[, context] ] ] ) => self
* @param {String} [name] 事件名
* @param {Function} [callback] 事件处理器
* @param {Object} [context] 事件处理器的上下文。
* @return {self} 返回自身,方便链式
* @chainable
*/
off: function( name, cb, ctx ) {
var events = this._events;
if ( !events ) {
return this;
}
if ( !name && !cb && !ctx ) {
this._events = [];
return this;
}
eachEvent( name, cb, function( name, cb ) {
$.each( findHandlers( events, name, cb, ctx ), function() {
delete events[ this.id ];
});
});
return this;
},
/**
* 触发事件
* @method trigger
* @grammar trigger( name[, args...] ) => self
* @param {String} type 事件名
* @param {*} [...] 任意参数
* @return {Boolean} 如果handler中return false了,则返回false, 否则返回true
*/
trigger: function( type ) {
var args, events, allEvents;
if ( !this._events || !type ) {
return this;
}
args = slice.call( arguments, 1 );
events = findHandlers( this._events, type );
allEvents = findHandlers( this._events, 'all' );
return triggerHanders( events, args ) &&
triggerHanders( allEvents, arguments );
}
};
/**
* 中介者,它本身是个单例,但可以通过[installTo](#WebUploader:Mediator:installTo)方法,使任何对象具备事件行为。
* 主要目的是负责模块与模块之间的合作,降低耦合度。
*
* @class Mediator
*/
return $.extend({
/**
* 可以通过这个接口,使任何对象具备事件功能。
* @method installTo
* @param {Object} obj 需要具备事件行为的对象。
* @return {Object} 返回obj.
*/
installTo: function( obj ) {
return $.extend( obj, protos );
}
}, protos );
});
/**
* @fileOverview Uploader上传类
*/
define('uploader',[
'base',
'mediator'
], function( Base, Mediator ) {
var $ = Base.$;
/**
* 上传入口类。
* @class Uploader
* @constructor
* @grammar new Uploader( opts ) => Uploader
* @example
* var uploader = WebUploader.Uploader({
* swf: 'path_of_swf/Uploader.swf',
*
* // 开起分片上传。
* chunked: true
* });
*/
function Uploader( opts ) {
this.options = $.extend( true, {}, Uploader.options, opts );
this._init( this.options );
}
// default Options
// widgets中有相应扩展
Uploader.options = {};
Mediator.installTo( Uploader.prototype );
// 批量添加纯命令式方法。
$.each({
upload: 'start-upload',
stop: 'stop-upload',
getFile: 'get-file',
getFiles: 'get-files',
addFile: 'add-file',
addFiles: 'add-file',
sort: 'sort-files',
removeFile: 'remove-file',
skipFile: 'skip-file',
retry: 'retry',
isInProgress: 'is-in-progress',
makeThumb: 'make-thumb',
getDimension: 'get-dimension',
addButton: 'add-btn',
getRuntimeType: 'get-runtime-type',
refresh: 'refresh',
disable: 'disable',
enable: 'enable',
reset: 'reset'
}, function( fn, command ) {
Uploader.prototype[ fn ] = function() {
return this.request( command, arguments );
};
});
$.extend( Uploader.prototype, {
state: 'pending',
_init: function( opts ) {
var me = this;
me.request( 'init', opts, function() {
me.state = 'ready';
me.trigger('ready');
});
},
/**
* 获取或者设置UploaderConfiguration项。
* @method option
* @grammar option( key ) => *
* @grammar option( key, val ) => self
* @example
*
* // 初始状态图片上传前不会压缩
* var uploader = new WebUploader.Uploader({
* resize: null;
* });
*
* // 修改后图片上传前,尝试将图片压缩到1600 * 1600
* uploader.options( 'resize', {
* width: 1600,
* height: 1600
* });
*/
option: function( key, val ) {
var opts = this.options;
// setter
if ( arguments.length > 1 ) {
if ( $.isPlainObject( val ) &&
$.isPlainObject( opts[ key ] ) ) {
$.extend( opts[ key ], val );
} else {
opts[ key ] = val;
}
} else { // getter
return key ? opts[ key ] : opts;
}
},
/**
* 获取文件统计信息。返回一个包含一下信息的对象。
* * `successNum` 上传成功的文件数
* * `uploadFailNum` 上传失败的文件数
* * `cancelNum` 被删除的文件数
* * `invalidNum` 无效的文件数
* * `queueNum` 还在队列中的文件数
* @method getStats
* @grammar getStats() => Object
*/
getStats: function() {
// return this._mgr.getStats.apply( this._mgr, arguments );
var stats = this.request('get-stats');
return {
successNum: stats.numOfSuccess,
// who care?
// queueFailNum: 0,
cancelNum: stats.numOfCancel,
invalidNum: stats.numOfInvalid,
uploadFailNum: stats.numOfUploadFailed,
queueNum: stats.numOfQueue
};
},
// 需要重写此方法来来支持opts.onEvent和instance.onEvent的处理器
trigger: function( type/*, args...*/ ) {
var args = [].slice.call( arguments, 1 ),
opts = this.options,
name = 'on' + type.substring( 0, 1 ).toUpperCase() +
type.substring( 1 );
if (
// 调用通过on方法注册的handler.
Mediator.trigger.apply( this, arguments ) === false ||
// 调用opts.onEvent
$.isFunction( opts[ name ] ) &&
opts[ name ].apply( this, args ) === false ||
// 调用this.onEvent
$.isFunction( this[ name ] ) &&
this[ name ].apply( this, args ) === false ||
// 广播所有uploader的事件。
Mediator.trigger.apply( Mediator,
[ this, type ].concat( args ) ) === false ) {
return false;
}
return true;
},
// widgets/widget.js将补充此方法的detailed文档。
request: Base.noop
});
/**
* 创建Uploader实例,等同于new Uploader( opts );
* @method create
* @class Base
* @static
* @grammar Base.create( opts ) => Uploader
*/
Base.create = Uploader.create = function( opts ) {
return new Uploader( opts );
};
// 暴露Uploader,可以通过它来扩展业务逻辑。
Base.Uploader = Uploader;
return Uploader;
});
/**
* @fileOverview Runtime管理器,负责Runtime的选择, 连接
*/
define('runtime/runtime',[
'base',
'mediator'
], function( Base, Mediator ) {
var $ = Base.$,
factories = {},
// 获取对象的第一个key
getFirstKey = function( obj ) {
for ( var key in obj ) {
if ( obj.hasOwnProperty( key ) ) {
return key;
}
}
return null;
};
// 接口类。
function Runtime( options ) {
this.options = $.extend({
container: document.body
}, options );
this.uid = Base.guid('rt_');
}
$.extend( Runtime.prototype, {
getContainer: function() {
var opts = this.options,
parent, container;
if ( this._container ) {
return this._container;
}
parent = $( opts.container || document.body );
container = $( document.createElement('div') );
container.attr( 'id', 'rt_' + this.uid );
container.css({
position: 'absolute',
top: '0px',
left: '0px',
width: '1px',
height: '1px',
overflow: 'hidden'
});
parent.append( container );
parent.addClass('webuploader-container');
this._container = container;
return container;
},
init: Base.noop,
exec: Base.noop,
destroy: function() {
if ( this._container ) {
this._container.parentNode.removeChild( this.__container );
}
this.off();
}
});
Runtime.orders = 'html5,flash';
/**
* 添加Runtime实现。
* @param {String} type 类型
* @param {Runtime} factory 具体Runtime实现。
*/
Runtime.addRuntime = function( type, factory ) {
factories[ type ] = factory;
};
Runtime.hasRuntime = function( type ) {
return !!(type ? factories[ type ] : getFirstKey( factories ));
};
Runtime.create = function( opts, orders ) {
var type, runtime;
orders = orders || Runtime.orders;
$.each( orders.split( /\s*,\s*/g ), function() {
if ( factories[ this ] ) {
type = this;
return false;
}
});
type = type || getFirstKey( factories );
if ( !type ) {
throw new Error('Runtime Error');
}
runtime = new factories[ type ]( opts );
return runtime;
};
Mediator.installTo( Runtime.prototype );
return Runtime;
});
/**
* @fileOverview Runtime管理器,负责Runtime的选择, 连接
*/
define('runtime/client',[
'base',
'mediator',
'runtime/runtime'
], function( Base, Mediator, Runtime ) {
var cache;
cache = (function() {
var obj = {};
return {
add: function( runtime ) {
obj[ runtime.uid ] = runtime;
},
get: function( ruid, standalone ) {
var i;
if ( ruid ) {
return obj[ ruid ];
}
for ( i in obj ) {
// 有些类型不能重用,比如filepicker.
if ( standalone && obj[ i ].__standalone ) {
continue;
}
return obj[ i ];
}
return null;
},
remove: function( runtime ) {
delete obj[ runtime.uid ];
}
};
})();
function RuntimeClient( component, standalone ) {
var deferred = Base.Deferred(),
runtime;
this.uid = Base.guid('client_');
// 允许runtime没有初始化之前,注册一些方法在初始化后执行。
this.runtimeReady = function( cb ) {
return deferred.done( cb );
};
this.connectRuntime = function( opts, cb ) {
// already connected.
if ( runtime ) {
throw new Error('already connected!');
}
deferred.done( cb );
if ( typeof opts === 'string' && cache.get( opts ) ) {
runtime = cache.get( opts );
}
// 像filePicker只能独立存在,不能公用。
runtime = runtime || cache.get( null, standalone );
// 需要创建
if ( !runtime ) {
runtime = Runtime.create( opts, opts.runtimeOrder );
runtime.__promise = deferred.promise();
runtime.once( 'ready', deferred.resolve );
runtime.init();
cache.add( runtime );
runtime.__client = 1;
} else {
// 来自cache
Base.$.extend( runtime.options, opts );
runtime.__promise.then( deferred.resolve );
runtime.__client++;
}
standalone && (runtime.__standalone = standalone);
return runtime;
};
this.getRuntime = function() {
return runtime;
};
this.disconnectRuntime = function() {
if ( !runtime ) {
return;
}
runtime.__client--;
if ( runtime.__client <= 0 ) {
cache.remove( runtime );
delete runtime.__promise;
runtime.destroy();
}
runtime = null;
};
this.exec = function() {
if ( !runtime ) {
return;
}
var args = Base.slice( arguments );
component && args.unshift( component );
return runtime.exec.apply( this, args );
};
this.getRuid = function() {
return runtime && runtime.uid;
};
this.destroy = (function( destroy ) {
return function() {
destroy && destroy.apply( this, arguments );
this.trigger('destroy');
this.off();
this.exec('destroy');
this.disconnectRuntime();
};
})( this.destroy );
}
Mediator.installTo( RuntimeClient.prototype );
return RuntimeClient;
});
/**
* @fileOverview Blob
*/
define('lib/blob',[
'base',
'runtime/client'
], function( Base, RuntimeClient ) {
function Blob( ruid, source ) {
var me = this;
me.source = source;
me.ruid = ruid;
RuntimeClient.call( me, 'Blob' );
this.uid = source.uid || this.uid;
this.type = source.type || '';
this.size = source.size || 0;
if ( ruid ) {
me.connectRuntime( ruid );
}
}
Base.inherits( RuntimeClient, {
constructor: Blob,
slice: function( start, end ) {
return this.exec( 'slice', start, end );
},
getSource: function() {
return this.source;
}
});
return Blob;
});
/**
* 为了统一化Flash的File和HTML5的File而存在。
* 以至于要调用Flash里面的File,也可以像调用HTML5版本的File一下。
* @fileOverview File
*/
define('lib/file',[
'base',
'lib/blob'
], function( Base, Blob ) {
var uid = 1,
rExt = /\.([^.]+)$/;
function File( ruid, file ) {
var ext;
Blob.apply( this, arguments );
this.name = file.name || ('untitled' + uid++);
ext = rExt.exec( file.name ) ? RegExp.$1.toLowerCase() : '';
// todo 支持其他类型文件的转换。
// 如果有mimetype, 但是文件名里面没有找出后缀规律
if ( !ext && this.type ) {
ext = /\/(jpg|jpeg|png|gif|bmp)$/i.exec( this.type ) ?
RegExp.$1.toLowerCase() : '';
this.name += '.' + ext;
}
// 如果没有指定mimetype, 但是知道文件后缀。
if ( !this.type && ~'jpg,jpeg,png,gif,bmp'.indexOf( ext ) ) {
this.type = 'image/' + (ext === 'jpg' ? 'jpeg' : ext);
}
this.ext = ext;
this.lastModifiedDate = file.lastModifiedDate ||
(new Date()).toLocaleString();
}
return Base.inherits( Blob, File );
});
/**
* @fileOverview 错误信息
*/
define('lib/filepicker',[
'base',
'runtime/client',
'lib/file'
], function( Base, RuntimeClent, File ) {
var $ = Base.$;
function FilePicker( opts ) {
opts = this.options = $.extend({}, FilePicker.options, opts );
opts.container = $( opts.id );
if ( !opts.container.length ) {
throw new Error('按钮指定错误');
}
opts.innerHTML = opts.innerHTML || opts.label ||
opts.container.html() || '';
opts.button = $( opts.button || document.createElement('div') );
opts.button.html( opts.innerHTML );
opts.container.html( opts.button );
RuntimeClent.call( this, 'FilePicker', true );
}
FilePicker.options = {
button: null,
container: null,
label: null,
innerHTML: null,
multiple: true,
accept: null,
name: 'file'
};
Base.inherits( RuntimeClent, {
constructor: FilePicker,
init: function() {
var me = this,
opts = me.options,
button = opts.button;
button.addClass('webuploader-pick');
me.on( 'all', function( type ) {
var files;
switch ( type ) {
case 'mouseenter':
button.addClass('webuploader-pick-hover');
break;
case 'mouseleave':
button.removeClass('webuploader-pick-hover');
break;
case 'change':
files = me.exec('getFiles');
me.trigger( 'select', $.map( files, function( file ) {
file = new File( me.getRuid(), file );
// 记录来源。
file._refer = opts.container;
return file;
}), opts.container );
break;
}
});
me.connectRuntime( opts, function() {
me.refresh();
me.exec( 'init', opts );
me.trigger('ready');
});
$( window ).on( 'resize', function() {
me.refresh();
});
},
refresh: function() {
var shimContainer = this.getRuntime().getContainer(),
button = this.options.button,
width = button.outerWidth ?
button.outerWidth() : button.width(),
height = button.outerHeight ?
button.outerHeight() : button.height(),
pos = button.offset();
width && height && shimContainer.css({
bottom: 'auto',
right: 'auto',
width: width + 'px',
height: height + 'px'
}).offset( pos );
},
enable: function() {
var btn = this.options.button;
btn.removeClass('webuploader-pick-disable');
this.refresh();
},
disable: function() {
var btn = this.options.button;
this.getRuntime().getContainer().css({
top: '-99999px'
});
btn.addClass('webuploader-pick-disable');
},
destroy: function() {
if ( this.runtime ) {
this.exec('destroy');
this.disconnectRuntime();
}
}
});
return FilePicker;
});
/**
* @fileOverview Component 基类。
*/
define('widgets/widget',[
'base',
'uploader'
], function( Base, Uploader ) {
var $ = Base.$,
_init = Uploader.prototype._init,
IGNORE = {},
widgetClass = [];
function isArrayLike( obj ) {
if ( !obj ) {
return false;
}
var length = obj.length,
type = $.type( obj );
if ( obj.nodeType === 1 && length ) {
return true;
}
return type === 'array' || type !== 'function' && type !== 'string' &&
(length === 0 || typeof length === 'number' && length > 0 &&
(length - 1) in obj);
}
function Widget( uploader ) {
this.owner = uploader;
this.options = uploader.options;
}
$.extend( Widget.prototype, {
init: Base.noop,
// 类Backbone的事件监听声明,监听uploader实例上的事件
// widget直接无法监听事件,事件只能通过uploader来传递
invoke: function( apiName, args ) {
/*
{
'make-thumb': 'makeThumb'
}
*/
var map = this.responseMap;
// 如果无API响应声明则忽略
if ( !map || !(apiName in map) || !(map[ apiName ] in this) ||
!$.isFunction( this[ map[ apiName ] ] ) ) {
return IGNORE;
}
return this[ map[ apiName ] ].apply( this, args );
},
/**
* 发送命令。当传入`callback`或者`handler`中返回`promise`时。返回一个当所有`handler`中的promise都完成后完成的新`promise`。
* @method request
* @grammar request( command, args ) => * | Promise
* @grammar request( command, args, callback ) => Promise
* @for Uploader
*/
request: function() {
return this.owner.request.apply( this.owner, arguments );
}
});
// 扩展Uploader.
$.extend( Uploader.prototype, {
// 覆写_init用来初始化widgets
_init: function() {
var me = this,
widgets = me._widgets = [];
$.each( widgetClass, function( _, klass ) {
widgets.push( new klass( me ) );
});
return _init.apply( me, arguments );
},
request: function( apiName, args, callback ) {
var i = 0,
widgets = this._widgets,
len = widgets.length,
rlts = [],
dfds = [],
widget, rlt, promise, key;
args = isArrayLike( args ) ? args : [ args ];
for ( ; i < len; i++ ) {
widget = widgets[ i ];
rlt = widget.invoke( apiName, args );
if ( rlt !== IGNORE ) {
// Deferred对象
if ( Base.isPromise( rlt ) ) {
dfds.push( rlt );
} else {
rlts.push( rlt );
}
}
}
// 如果有callback,则用异步方式。
if ( callback || dfds.length ) {
promise = Base.when.apply( Base, dfds );
key = promise.pipe ? 'pipe' : 'then';
// 很重要不能删除。删除了会死循环。
// 保证执行顺序。让callback总是在下一个tick中执行。
return promise[ key ](function() {
var deferred = Base.Deferred(),
args = arguments;
setTimeout(function() {
deferred.resolve.apply( deferred, args );
}, 1 );
return deferred.promise();
})[ key ]( callback || Base.noop );
} else {
return rlts[ 0 ];
}
}
});
/**
* 添加Component
* @param {object} widgetProto Component 原型,构造函数通过constructor属性定义
* @param {object} responseMap API名称与函数实现的映射
* @example
* Uploader.register( {
* init: function( options ) {},
* makeThumb: function() {}
* }, {
* 'make-thumb': 'makeThumb'
* } );
*/
Uploader.register = Widget.register = function( responseMap, widgetProto ) {
var map = { init: 'init' },
klass;
if ( arguments.length === 1 ) {
widgetProto = responseMap;
widgetProto.responseMap = map;
} else {
widgetProto.responseMap = $.extend( map, responseMap );
}
klass = Base.inherits( Widget, widgetProto );
widgetClass.push( klass );
return klass;
};
return Widget;
});
/**
* @fileOverview 文件选择相关
*/
define('widgets/filepicker',[
'base',
'uploader',
'lib/filepicker',
'widgets/widget'
], function( Base, Uploader, FilePicker ) {
var $ = Base.$;
$.extend( Uploader.options, {
/**
* @property {Selector | Object} [pick=undefined]
* @namespace options
* @for Uploader
* @description 指定选择文件的按钮容器,不指定则不创建按钮。
*
* * `id` {Seletor} 指定选择文件的按钮容器,不指定则不创建按钮。
* * `label` {String} 请采用 `innerHTML` 代替
* * `innerHTML` {String} 指定按钮文字。不指定时优先从指定的容器中看是否自带文字。
* * `multiple` {Boolean} 是否开起同时选择多个文件能力。
*/
pick: null,
/**
* @property {Arroy} [accept=null]
* @namespace options
* @for Uploader
* @description 指定接受哪些类型的文件。 由于目前还有ext转mimeType表,所以这里需要分开指定。
*
* * `title` {String} 文字description
* * `extensions` {String} 允许的文件后缀,不带点,多个用逗号分割。
* * `mimeTypes` {String} 多个用逗号分割。
*
* 如:
*
* ```
* {
* title: 'Images',
* extensions: 'gif,jpg,jpeg,bmp,png',
* mimeTypes: 'image/*'
* }
* ```
*/
accept: null/*{
title: 'Images',
extensions: 'gif,jpg,jpeg,bmp,png',
mimeTypes: 'image/*'
}*/
});
return Uploader.register({
'add-btn': 'addButton',
refresh: 'refresh',
disable: 'disable',
enable: 'enable'
}, {
init: function( opts ) {
this.pickers = [];
return opts.pick && this.addButton( opts.pick );
},
refresh: function() {
$.each( this.pickers, function() {
this.refresh();
});
},
/**
* @method addButton
* @for Uploader
* @grammar addButton( pick ) => Promise
* @description
* 添加文件选择按钮,如果一个按钮不够,需要调用此方法来添加。参数跟[options.pick](#WebUploader:Uploader:options)一致。
* @example
* uploader.addButton({
* id: '#btnContainer',
* innerHTML: '选择文件'
* });
*/
addButton: function( pick ) {
var me = this,
opts = me.options,
accept = opts.accept,
options, picker, deferred;
if ( !pick ) {
return;
}
deferred = Base.Deferred();
$.isPlainObject( pick ) || (pick = {
id: pick
});
options = $.extend({}, pick, {
accept: $.isPlainObject( accept ) ? [ accept ] : accept,
swf: opts.swf,
runtimeOrder: opts.runtimeOrder
});
picker = new FilePicker( options );
picker.once( 'ready', deferred.resolve );
picker.on( 'select', function( files ) {
me.owner.request( 'add-file', [ files ]);
});
picker.init();
this.pickers.push( picker );
return deferred.promise();
},
disable: function() {
$.each( this.pickers, function() {
this.disable();
});
},
enable: function() {
$.each( this.pickers, function() {
this.enable();
});
}
});
});
/**
* @fileOverview Image
*/
define('lib/image',[
'base',
'runtime/client',
'lib/blob'
], function( Base, RuntimeClient, Blob ) {
var $ = Base.$;
// 构造器。
function Image( opts ) {
this.options = $.extend({}, Image.options, opts );
RuntimeClient.call( this, 'Image' );
this.on( 'load', function() {
this._info = this.exec('info');
this._meta = this.exec('meta');
});
}
// Default选项。
Image.options = {
// Default的图片处理质量
quality: 90,
// 是否裁剪
crop: false,
// 是否保留头部信息
preserveHeaders: true,
// 是否允许放大。
allowMagnify: true
};
// 继承RuntimeClient.
Base.inherits( RuntimeClient, {
constructor: Image,
info: function( val ) {
// setter
if ( val ) {
this._info = val;
return this;
}
// getter
return this._info;
},
meta: function( val ) {
// setter
if ( val ) {
this._meta = val;
return this;
}
// getter
return this._meta;
},
loadFromBlob: function( blob ) {
var me = this,
ruid = blob.getRuid();
this.connectRuntime( ruid, function() {
me.exec( 'init', me.options );
me.exec( 'loadFromBlob', blob );
});
},
resize: function() {
var args = Base.slice( arguments );
return this.exec.apply( this, [ 'resize' ].concat( args ) );
},
getAsDataUrl: function( type ) {
return this.exec( 'getAsDataUrl', type );
},
getAsBlob: function( type ) {
var blob = this.exec( 'getAsBlob', type );
return new Blob( this.getRuid(), blob );
}
});
return Image;
});
/**
* @fileOverview 图片operating, 负责Preview 图片和上传前压缩图片
*/
define('widgets/image',[
'base',
'uploader',
'lib/image',
'widgets/widget'
], function( Base, Uploader, Image ) {
var $ = Base.$,
throttle;
// 根据要处理的文件大小来节流,一次不能处理太多,会卡。
throttle = (function( max ) {
var occupied = 0,
waiting = [],
tick = function() {
var item;
while ( waiting.length && occupied < max ) {
item = waiting.shift();
occupied += item[ 0 ];
item[ 1 ]();
}
};
return function( emiter, size, cb ) {
waiting.push([ size, cb ]);
emiter.once( 'destroy', function() {
occupied -= size;
setTimeout( tick, 1 );
});
setTimeout( tick, 1 );
};
})( 5 * 1024 * 1024 );
$.extend( Uploader.options, {
/**
* @property {Object} [thumb]
* @namespace options
* @for Uploader
* @description Configuration生成缩略图的选项。
*
* Default为:
*
* ```javascript
* {
* width: 110,
* height: 110,
*
* // 图片质量,只有type为`image/jpeg`的时候才有效。
* quality: 70,
*
* // 是否允许放大,如果想要生成小图的时候不失真,此选项应该设置为false.
* allowMagnify: true,
*
* // 是否允许裁剪。
* crop: true,
*
* // 是否保留头部meta信息。
* preserveHeaders: false,
*
* // 为空的话则保留原有图片格式。
* // 否则强制转换成指定的类型。
* type: 'image/jpeg'
* }
* ```
*/
thumb: {
width: 110,
height: 110,
quality: 70,
allowMagnify: true,
crop: true,
preserveHeaders: false,
// 为空的话则保留原有图片格式。
// 否则强制转换成指定的类型。
// IE 8下面 base64 大小不能超过 32K 否则Preview 失败,而非 jpeg 编码的图片很可
// 能会超过 32k, 所以这里设置成Preview 的时候都是 image/jpeg
type: 'image/jpeg'
},
/**
* @property {Object} [compress]
* @namespace options
* @for Uploader
* @description Configuration压缩的图片的选项。如果此选项为`false`, 则图片在上传前不进行压缩。
*
* Default为:
*
* ```javascript
* {
* width: 1600,
* height: 1600,
*
* // 图片质量,只有type为`image/jpeg`的时候才有效。
* quality: 90,
*
* // 是否允许放大,如果想要生成小图的时候不失真,此选项应该设置为false.
* allowMagnify: false,
*
* // 是否允许裁剪。
* crop: false,
*
* // 是否保留头部meta信息。
* preserveHeaders: true
* }
* ```
*/
compress: {
width: 1600,
height: 1600,
quality: 90,
allowMagnify: false,
crop: false,
preserveHeaders: true
}
});
return Uploader.register({
'make-thumb': 'makeThumb',
'before-send-file': 'compressImage'
}, {
/**
* 生成缩略图,此过程为异步,所以需要传入`callback`。
* 通常情况在图片加入队里后调用此方法来生成Preview 图以增强交互效果。
*
* `callback`中可以接收到两个参数。
* * 第一个为error,如果生成缩略图有错误,此error将为真。
* * 第二个为ret, 缩略图的Data URL值。
*
* **注意**
* Date URL在IE6/7中不支持,所以不用调用此方法了,直接显示一张暂不支持Preview 图片好了。
*
*
* @method makeThumb
* @grammar makeThumb( file, callback ) => undefined
* @grammar makeThumb( file, callback, width, height ) => undefined
* @for Uploader
* @example
*
* uploader.on( 'fileQueued', function( file ) {
* var $li = ...;
*
* uploader.makeThumb( file, function( error, ret ) {
* if ( error ) {
* $li.text('Preview 错误');
* } else {
* $li.append('<img alt="" src="' + ret + '" />');
* }
* });
*
* });
*/
makeThumb: function( file, cb, width, height ) {
var opts, image;
file = this.request( 'get-file', file );
// 只Preview 图片格式。
if ( !file.type.match( /^image/ ) ) {
cb( true );
return;
}
opts = $.extend({}, this.options.thumb );
// 如果传入的是object.
if ( $.isPlainObject( width ) ) {
opts = $.extend( opts, width );
width = null;
}
width = width || opts.width;
height = height || opts.height;
image = new Image( opts );
image.once( 'load', function() {
file._info = file._info || image.info();
file._meta = file._meta || image.meta();
image.resize( width, height );
});
image.once( 'complete', function() {
cb( false, image.getAsDataUrl( opts.type ) );
image.destroy();
});
image.once( 'error', function() {
cb( true );
image.destroy();
});
throttle( image, file.source.size, function() {
file._info && image.info( file._info );
file._meta && image.meta( file._meta );
image.loadFromBlob( file.source );
});
},
compressImage: function( file ) {
var opts = this.options.compress || this.options.resize,
compressSize = opts && opts.compressSize || 300 * 1024,
image, deferred;
file = this.request( 'get-file', file );
// 只Preview 图片格式。
if ( !opts || !~'image/jpeg,image/jpg'.indexOf( file.type ) ||
file.size < compressSize ||
file._compressed ) {
return;
}
opts = $.extend({}, opts );
deferred = Base.Deferred();
image = new Image( opts );
deferred.always(function() {
image.destroy();
image = null;
});
image.once( 'error', deferred.reject );
image.once( 'load', function() {
file._info = file._info || image.info();
file._meta = file._meta || image.meta();
image.resize( opts.width, opts.height );
});
image.once( 'complete', function() {
var blob, size;
// 移动端 UC / qq 浏览器的无图模式下
// ctx.getImageData 处理大图的时候会报 Exception
// INDEX_SIZE_ERR: DOM Exception 1
try {
blob = image.getAsBlob( opts.type );
size = file.size;
// 如果压缩后,比原来还大则不用压缩后的。
if ( blob.size < size ) {
// file.source.destroy && file.source.destroy();
file.source = blob;
file.size = blob.size;
file.trigger( 'resize', blob.size, size );
}
// 标记,避免重复压缩。
file._compressed = true;
deferred.resolve();
} catch ( e ) {
// 出错了直接继续,让其上传原始图片
deferred.resolve();
}
});
file._info && image.info( file._info );
file._meta && image.meta( file._meta );
image.loadFromBlob( file.source );
return deferred.promise();
}
});
});
/**
* @fileOverview 文件属性封装
*/
define('file',[
'base',
'mediator'
], function( Base, Mediator ) {
var $ = Base.$,
idPrefix = 'WU_FILE_',
idSuffix = 0,
rExt = /\.([^.]+)$/,
statusMap = {};
function gid() {
return idPrefix + idSuffix++;
}
/**
* 文件类
* @class File
* @constructor 构造函数
* @grammar new File( source ) => File
* @param {Lib.File} source [lib.File](#Lib.File)实例, 此source对象是带有Runtime信息的。
*/
function WUFile( source ) {
/**
* 文件名,包括扩展名(后缀)
* @property name
* @type {string}
*/
this.name = source.name || 'Untitled';
/**
* 文件体积(字节)
* @property size
* @type {uint}
* @default 0
*/
this.size = source.size || 0;
/**
* 文件MIMETYPE类型,与文件类型的对应关系请参考[http://t.cn/z8ZnFny](http://t.cn/z8ZnFny)
* @property type
* @type {string}
* @default 'application'
*/
this.type = source.type || 'application';
/**
* 文件最后修改日期
* @property lastModifiedDate
* @type {int}
* @default 当前时间戳
*/
this.lastModifiedDate = source.lastModifiedDate || (new Date() * 1);
/**
* 文件ID,每个对象具有唯一ID,与文件名无关
* @property id
* @type {string}
*/
this.id = gid();
/**
* 文件扩展名,通过文件名获取,例如test.png的扩展名为png
* @property ext
* @type {string}
*/
this.ext = rExt.exec( this.name ) ? RegExp.$1 : '';
/**
* 状态文字说明。在不同的status语境下有不同的用途。
* @property statusText
* @type {string}
*/
this.statusText = '';
// 存储文件状态,防止通过属性直接修改
statusMap[ this.id ] = WUFile.Status.INITED;
this.source = source;
this.loaded = 0;
this.on( 'error', function( msg ) {
this.setStatus( WUFile.Status.ERROR, msg );
});
}
$.extend( WUFile.prototype, {
/**
* 设置状态,状态变化时会触发`change`事件。
* @method setStatus
* @grammar setStatus( status[, statusText] );
* @param {File.Status|String} status [文件状态值](#WebUploader:File:File.Status)
* @param {String} [statusText=''] 状态说明,常在error时使用,用http, abort,server等来标记是由于什么原因导致文件错误。
*/
setStatus: function( status, text ) {
var prevStatus = statusMap[ this.id ];
typeof text !== 'undefined' && (this.statusText = text);
if ( status !== prevStatus ) {
statusMap[ this.id ] = status;
/**
* 文件状态变化
* @event statuschange
*/
this.trigger( 'statuschange', status, prevStatus );
}
},
/**
* 获取文件状态
* @return {File.Status}
* @example
文件状态具体包括以下几种类型:
{
// 初始化
INITED: 0,
// 已入队列
QUEUED: 1,
// 正在上传
PROGRESS: 2,
// 上传出错
ERROR: 3,
// 上传成功
COMPLETE: 4,
// 上传取消
CANCELLED: 5
}
*/
getStatus: function() {
return statusMap[ this.id ];
},
/**
* 获取文件原始信息。
* @return {*}
*/
getSource: function() {
return this.source;
},
destory: function() {
delete statusMap[ this.id ];
}
});
Mediator.installTo( WUFile.prototype );
/**
* 文件状态值,具体包括以下几种类型:
* * `inited` 初始状态
* * `queued` 已经进入队列, 等待上传
* * `progress` 上传中
* * `complete` 上传完成。
* * `error` 上传出错,可重试
* * `interrupt` 上传中断,可续传。
* * `invalid` 文件不合格,不能重试上传。会自动从队列中移除。
* * `cancelled` 文件被移除。
* @property {Object} Status
* @namespace File
* @class File
* @static
*/
WUFile.Status = {
INITED: 'inited', // 初始状态
QUEUED: 'queued', // 已经进入队列, 等待上传
PROGRESS: 'progress', // 上传中
ERROR: 'error', // 上传出错,可重试
COMPLETE: 'complete', // 上传完成。
CANCELLED: 'cancelled', // 上传取消。
INTERRUPT: 'interrupt', // 上传中断,可续传。
INVALID: 'invalid' // 文件不合格,不能重试上传。
};
return WUFile;
});
/**
* @fileOverview 文件队列
*/
define('queue',[
'base',
'mediator',
'file'
], function( Base, Mediator, WUFile ) {
var $ = Base.$,
STATUS = WUFile.Status;
/**
* 文件队列, 用来存储各个状态中的文件。
* @class Queue
* @extends Mediator
*/
function Queue() {
/**
* 统计文件数。
* * `numOfQueue` 队列中的文件数。
* * `numOfSuccess` 上传成功的文件数
* * `numOfCancel` 被移除的文件数
* * `numOfProgress` 正在上传中的文件数
* * `numOfUploadFailed` 上传错误的文件数。
* * `numOfInvalid` 无效的文件数。
* @property {Object} stats
*/
this.stats = {
numOfQueue: 0,
numOfSuccess: 0,
numOfCancel: 0,
numOfProgress: 0,
numOfUploadFailed: 0,
numOfInvalid: 0
};
// 上传队列,仅包括等待上传的文件
this._queue = [];
// 存储所有文件
this._map = {};
}
$.extend( Queue.prototype, {
/**
* 将新文件加入对队列尾部
*
* @method append
* @param {File} file 文件对象
*/
append: function( file ) {
this._queue.push( file );
this._fileAdded( file );
return this;
},
/**
* 将新文件加入对队列头部
*
* @method prepend
* @param {File} file 文件对象
*/
prepend: function( file ) {
this._queue.unshift( file );
this._fileAdded( file );
return this;
},
/**
* 获取文件对象
*
* @method getFile
* @param {String} fileId 文件ID
* @return {File}
*/
getFile: function( fileId ) {
if ( typeof fileId !== 'string' ) {
return fileId;
}
return this._map[ fileId ];
},
/**
* 从队列中取出一个指定状态的文件。
* @grammar fetch( status ) => File
* @method fetch
* @param {String} status [文件状态值](#WebUploader:File:File.Status)
* @return {File} [File](#WebUploader:File)
*/
fetch: function( status ) {
var len = this._queue.length,
i, file;
status = status || STATUS.QUEUED;
for ( i = 0; i < len; i++ ) {
file = this._queue[ i ];
if ( status === file.getStatus() ) {
return file;
}
}
return null;
},
/**
* 对队列进行排序,能够控制文件上传顺序。
* @grammar sort( fn ) => undefined
* @method sort
* @param {Function} fn 排序方法
*/
sort: function( fn ) {
if ( typeof fn === 'function' ) {
this._queue.sort( fn );
}
},
/**
* 获取指定类型的文件tables , tables 中每一个成员为[File](#WebUploader:File)对象。
* @grammar getFiles( [status1[, status2 ...]] ) => Array
* @method getFiles
* @param {String} [status] [文件状态值](#WebUploader:File:File.Status)
*/
getFiles: function() {
var sts = [].slice.call( arguments, 0 ),
ret = [],
i = 0,
len = this._queue.length,
file;
for ( ; i < len; i++ ) {
file = this._queue[ i ];
if ( sts.length && !~$.inArray( file.getStatus(), sts ) ) {
continue;
}
ret.push( file );
}
return ret;
},
_fileAdded: function( file ) {
var me = this,
existing = this._map[ file.id ];
if ( !existing ) {
this._map[ file.id ] = file;
file.on( 'statuschange', function( cur, pre ) {
me._onFileStatusChange( cur, pre );
});
}
file.setStatus( STATUS.QUEUED );
},
_onFileStatusChange: function( curStatus, preStatus ) {
var stats = this.stats;
switch ( preStatus ) {
case STATUS.PROGRESS:
stats.numOfProgress--;
break;
case STATUS.QUEUED:
stats.numOfQueue --;
break;
case STATUS.ERROR:
stats.numOfUploadFailed--;
break;
case STATUS.INVALID:
stats.numOfInvalid--;
break;
}
switch ( curStatus ) {
case STATUS.QUEUED:
stats.numOfQueue++;
break;
case STATUS.PROGRESS:
stats.numOfProgress++;
break;
case STATUS.ERROR:
stats.numOfUploadFailed++;
break;
case STATUS.COMPLETE:
stats.numOfSuccess++;
break;
case STATUS.CANCELLED:
stats.numOfCancel++;
break;
case STATUS.INVALID:
stats.numOfInvalid++;
break;
}
}
});
Mediator.installTo( Queue.prototype );
return Queue;
});
/**
* @fileOverview 队列
*/
define('widgets/queue',[
'base',
'uploader',
'queue',
'file',
'lib/file',
'runtime/client',
'widgets/widget'
], function( Base, Uploader, Queue, WUFile, File, RuntimeClient ) {
var $ = Base.$,
rExt = /\.\w+$/,
Status = WUFile.Status;
return Uploader.register({
'sort-files': 'sortFiles',
'add-file': 'addFiles',
'get-file': 'getFile',
'fetch-file': 'fetchFile',
'get-stats': 'getStats',
'get-files': 'getFiles',
'remove-file': 'removeFile',
'retry': 'retry',
'reset': 'reset',
'accept-file': 'acceptFile'
}, {
init: function( opts ) {
var me = this,
deferred, len, i, item, arr, accept, runtime;
if ( $.isPlainObject( opts.accept ) ) {
opts.accept = [ opts.accept ];
}
// accept中的中生成匹配Regular。
if ( opts.accept ) {
arr = [];
for ( i = 0, len = opts.accept.length; i < len; i++ ) {
item = opts.accept[ i ].extensions;
item && arr.push( item );
}
if ( arr.length ) {
accept = '\\.' + arr.join(',')
.replace( /,/g, '$|\\.' )
.replace( /\*/g, '.*' ) + '$';
}
me.accept = new RegExp( accept, 'i' );
}
me.queue = new Queue();
me.stats = me.queue.stats;
// 如果当前不是html5运行时,那就算了。
// 不执行后续operating
if ( this.request('predict-runtime-type') !== 'html5' ) {
return;
}
// 创建一个 html5 运行时的 placeholder
// 以至于外部添加原生 File 对象的时候能正确包裹一下供 webuploader 使用。
deferred = Base.Deferred();
runtime = new RuntimeClient('Placeholder');
runtime.connectRuntime({
runtimeOrder: 'html5'
}, function() {
me._ruid = runtime.getRuid();
deferred.resolve();
});
return deferred.promise();
},
// 为了支持外部直接添加一个原生File对象。
_wrapFile: function( file ) {
if ( !(file instanceof WUFile) ) {
if ( !(file instanceof File) ) {
if ( !this._ruid ) {
throw new Error('Can\'t add external files.');
}
file = new File( this._ruid, file );
}
file = new WUFile( file );
}
return file;
},
// 判断文件是否可以被加入队列
acceptFile: function( file ) {
var invalid = !file || file.size < 6 || this.accept &&
// 如果名字中有后缀,才做后缀白名单处理。
rExt.exec( file.name ) && !this.accept.test( file.name );
return !invalid;
},
/**
* @event beforeFileQueued
* @param {File} file File对象
* @description 当文件被加入队列之前触发,此事件的handler返回值为`false`,则此文件不会被添加进入队列。
* @for Uploader
*/
/**
* @event fileQueued
* @param {File} file File对象
* @description 当文件被加入队列以后触发。
* @for Uploader
*/
_addFile: function( file ) {
var me = this;
file = me._wrapFile( file );
// 不过类型判断允许不允许,先派送 `beforeFileQueued`
if ( !me.owner.trigger( 'beforeFileQueued', file ) ) {
return;
}
// 类型不匹配,则派送错误事件,并返回。
if ( !me.acceptFile( file ) ) {
me.owner.trigger( 'error', 'Q_TYPE_DENIED', file );
return;
}
me.queue.append( file );
me.owner.trigger( 'fileQueued', file );
return file;
},
getFile: function( fileId ) {
return this.queue.getFile( fileId );
},
/**
* @event filesQueued
* @param {File} files 数组,内容为原始File(lib/File)对象。
* @description 当一批文件添加进队列以后触发。
* @for Uploader
*/
/**
* @method addFiles
* @grammar addFiles( file ) => undefined
* @grammar addFiles( [file1, file2 ...] ) => undefined
* @param {Array of File or File} [files] Files 对象 数组
* @description 添加文件到队列
* @for Uploader
*/
addFiles: function( files ) {
var me = this;
if ( !files.length ) {
files = [ files ];
}
files = $.map( files, function( file ) {
return me._addFile( file );
});
me.owner.trigger( 'filesQueued', files );
if ( me.options.auto ) {
me.request('start-upload');
}
},
getStats: function() {
return this.stats;
},
/**
* @event fileDequeued
* @param {File} file File对象
* @description 当文件被移除队列后触发。
* @for Uploader
*/
/**
* @method removeFile
* @grammar removeFile( file ) => undefined
* @grammar removeFile( id ) => undefined
* @param {File|id} file File对象或这File对象的id
* @description 移除某一文件。
* @for Uploader
* @example
*
* $li.on('click', '.remove-this', function() {
* uploader.removeFile( file );
* })
*/
removeFile: function( file ) {
var me = this;
file = file.id ? file : me.queue.getFile( file );
file.setStatus( Status.CANCELLED );
me.owner.trigger( 'fileDequeued', file );
},
/**
* @method getFiles
* @grammar getFiles() => Array
* @grammar getFiles( status1, status2, status... ) => Array
* @description 返回指定状态的文件集合,不传参数将返回所有状态的文件。
* @for Uploader
* @example
* console.log( uploader.getFiles() ); // => all files
* console.log( uploader.getFiles('error') ) // => all error files.
*/
getFiles: function() {
return this.queue.getFiles.apply( this.queue, arguments );
},
fetchFile: function() {
return this.queue.fetch.apply( this.queue, arguments );
},
/**
* @method retry
* @grammar retry() => undefined
* @grammar retry( file ) => undefined
* @description 重试上传,重试指定文件,或者从出错的文件开始重新上传。
* @for Uploader
* @example
* function retry() {
* uploader.retry();
* }
*/
retry: function( file, noForceStart ) {
var me = this,
files, i, len;
if ( file ) {
file = file.id ? file : me.queue.getFile( file );
file.setStatus( Status.QUEUED );
noForceStart || me.request('start-upload');
return;
}
files = me.queue.getFiles( Status.ERROR );
i = 0;
len = files.length;
for ( ; i < len; i++ ) {
file = files[ i ];
file.setStatus( Status.QUEUED );
}
me.request('start-upload');
},
/**
* @method sort
* @grammar sort( fn ) => undefined
* @description 排序队列中的文件,在上传之前调整可以控制上传顺序。
* @for Uploader
*/
sortFiles: function() {
return this.queue.sort.apply( this.queue, arguments );
},
/**
* @method reset
* @grammar reset() => undefined
* @description 重置uploader。目前只重置了队列。
* @for Uploader
* @example
* uploader.reset();
*/
reset: function() {
this.queue = new Queue();
this.stats = this.queue.stats;
}
});
});
/**
* @fileOverview 添加获取Runtime相关信息的方法。
*/
define('widgets/runtime',[
'uploader',
'runtime/runtime',
'widgets/widget'
], function( Uploader, Runtime ) {
Uploader.support = function() {
return Runtime.hasRuntime.apply( Runtime, arguments );
};
return Uploader.register({
'predict-runtime-type': 'predictRuntmeType'
}, {
init: function() {
if ( !this.predictRuntmeType() ) {
throw Error('Runtime Error');
}
},
/**
* 预测Uploader将采用哪个`Runtime`
* @grammar predictRuntmeType() => String
* @method predictRuntmeType
* @for Uploader
*/
predictRuntmeType: function() {
var orders = this.options.runtimeOrder || Runtime.orders,
type = this.type,
i, len;
if ( !type ) {
orders = orders.split( /\s*,\s*/g );
for ( i = 0, len = orders.length; i < len; i++ ) {
if ( Runtime.hasRuntime( orders[ i ] ) ) {
this.type = type = orders[ i ];
break;
}
}
}
return type;
}
});
});
/**
* @fileOverview Transport
*/
define('lib/transport',[
'base',
'runtime/client',
'mediator'
], function( Base, RuntimeClient, Mediator ) {
var $ = Base.$;
function Transport( opts ) {
var me = this;
opts = me.options = $.extend( true, {}, Transport.options, opts || {} );
RuntimeClient.call( this, 'Transport' );
this._blob = null;
this._formData = opts.formData || {};
this._headers = opts.headers || {};
this.on( 'progress', this._timeout );
this.on( 'load error', function() {
me.trigger( 'progress', 1 );
clearTimeout( me._timer );
});
}
Transport.options = {
server: '',
method: 'POST',
// 跨域时,是否允许携带cookie, 只有html5 runtime才有效
withCredentials: false,
fileVal: 'file',
timeout: 2 * 60 * 1000, // 2分钟
formData: {},
headers: {},
sendAsBinary: false
};
$.extend( Transport.prototype, {
// 添加Blob, 只能添加一次,最后一次有效。
appendBlob: function( key, blob, filename ) {
var me = this,
opts = me.options;
if ( me.getRuid() ) {
me.disconnectRuntime();
}
// 连接到blob归属的同一个runtime.
me.connectRuntime( blob.ruid, function() {
me.exec('init');
});
me._blob = blob;
opts.fileVal = key || opts.fileVal;
opts.filename = filename || opts.filename;
},
// 添加其他字段
append: function( key, value ) {
if ( typeof key === 'object' ) {
$.extend( this._formData, key );
} else {
this._formData[ key ] = value;
}
},
setRequestHeader: function( key, value ) {
if ( typeof key === 'object' ) {
$.extend( this._headers, key );
} else {
this._headers[ key ] = value;
}
},
send: function( method ) {
this.exec( 'send', method );
this._timeout();
},
abort: function() {
clearTimeout( this._timer );
return this.exec('abort');
},
destroy: function() {
this.trigger('destroy');
this.off();
this.exec('destroy');
this.disconnectRuntime();
},
getResponse: function() {
return this.exec('getResponse');
},
getResponseAsJson: function() {
return this.exec('getResponseAsJson');
},
getStatus: function() {
return this.exec('getStatus');
},
_timeout: function() {
var me = this,
duration = me.options.timeout;
if ( !duration ) {
return;
}
clearTimeout( me._timer );
me._timer = setTimeout(function() {
me.abort();
me.trigger( 'error', 'timeout' );
}, duration );
}
});
// 让Transport具备事件功能。
Mediator.installTo( Transport.prototype );
return Transport;
});
/**
* @fileOverview 负责文件上传相关。
*/
define('widgets/upload',[
'base',
'uploader',
'file',
'lib/transport',
'widgets/widget'
], function( Base, Uploader, WUFile, Transport ) {
var $ = Base.$,
isPromise = Base.isPromise,
Status = WUFile.Status;
// 添加DefaultConfiguration项
$.extend( Uploader.options, {
/**
* @property {Boolean} [prepareNextFile=false]
* @namespace options
* @for Uploader
* @description 是否允许在文件传输时提前把下一个文件准备好。
* 对于一个文件的准备工作比较耗时,比如图片压缩,md5序列化。
* 如果能提前在当前文件传输期处理,可以节省总体耗时。
*/
prepareNextFile: false,
/**
* @property {Boolean} [chunked=false]
* @namespace options
* @for Uploader
* @description 是否要分片处理大文件上传。
*/
chunked: false,
/**
* @property {Boolean} [chunkSize=5242880]
* @namespace options
* @for Uploader
* @description 如果要分片,分多大一片? Default大小为5M.
*/
chunkSize: 5 * 1024 * 1024,
/**
* @property {Boolean} [chunkRetry=2]
* @namespace options
* @for Uploader
* @description 如果某个分片由于网络问题出错,允许自动重传多少次?
*/
chunkRetry: 2,
/**
* @property {Boolean} [threads=3]
* @namespace options
* @for Uploader
* @description 上传并发数。允许同时最大上传进程数。
*/
threads: 3,
/**
* @property {Object} [formData]
* @namespace options
* @for Uploader
* @description 文件上传请求的参数表,每次发送都会发送此对象中的参数。
*/
formData: null
/**
* @property {Object} [fileVal='file']
* @namespace options
* @for Uploader
* @description 设置文件上传域的name。
*/
/**
* @property {Object} [method='POST']
* @namespace options
* @for Uploader
* @description 文件上传方式,`POST`或者`GET`。
*/
/**
* @property {Object} [sendAsBinary=false]
* @namespace options
* @for Uploader
* @description 是否已二进制的流的方式发送文件,这样整个上传内容`php://input`都为文件内容,
* 其他参数在$_GET数组中。
*/
});
// 负责将文件切片。
function CuteFile( file, chunkSize ) {
var pending = [],
blob = file.source,
total = blob.size,
chunks = chunkSize ? Math.ceil( total / chunkSize ) : 1,
start = 0,
index = 0,
len;
while ( index < chunks ) {
len = Math.min( chunkSize, total - start );
pending.push({
file: file,
start: start,
end: chunkSize ? (start + len) : total,
total: total,
chunks: chunks,
chunk: index++
});
start += len;
}
file.blocks = pending.concat();
file.remaning = pending.length;
return {
file: file,
has: function() {
return !!pending.length;
},
fetch: function() {
return pending.shift();
}
};
}
Uploader.register({
'start-upload': 'start',
'stop-upload': 'stop',
'skip-file': 'skipFile',
'is-in-progress': 'isInProgress'
}, {
init: function() {
var owner = this.owner;
this.runing = false;
// 记录当前正在传的数据,跟threads相关
this.pool = [];
// 缓存即将上传的文件。
this.pending = [];
// 跟踪还有多少分片没有完成上传。
this.remaning = 0;
this.__tick = Base.bindFn( this._tick, this );
owner.on( 'uploadComplete', function( file ) {
// 把其他块取消了。
file.blocks && $.each( file.blocks, function( _, v ) {
v.transport && (v.transport.abort(), v.transport.destroy());
delete v.transport;
});
delete file.blocks;
delete file.remaning;
});
},
/**
* @event startUpload
* @description 当开始上传流程时触发。
* @for Uploader
*/
/**
* 开始上传。此方法可以从初始状态调用开始上传流程,也可以从暂停状态调用,继续上传流程。
* @grammar upload() => undefined
* @method upload
* @for Uploader
*/
start: function() {
var me = this;
// 移出invalid的文件
$.each( me.request( 'get-files', Status.INVALID ), function() {
me.request( 'remove-file', this );
});
if ( me.runing ) {
return;
}
me.runing = true;
// 如果有暂停的,则续传
$.each( me.pool, function( _, v ) {
var file = v.file;
if ( file.getStatus() === Status.INTERRUPT ) {
file.setStatus( Status.PROGRESS );
me._trigged = false;
v.transport && v.transport.send();
}
});
me._trigged = false;
me.owner.trigger('startUpload');
Base.nextTick( me.__tick );
},
/**
* @event stopUpload
* @description 当开始上传流程暂停时触发。
* @for Uploader
*/
/**
* 暂停上传。第一个参数为是否中断上传当前正在上传的文件。
* @grammar stop() => undefined
* @grammar stop( true ) => undefined
* @method stop
* @for Uploader
*/
stop: function( interrupt ) {
var me = this;
if ( me.runing === false ) {
return;
}
me.runing = false;
interrupt && $.each( me.pool, function( _, v ) {
v.transport && v.transport.abort();
v.file.setStatus( Status.INTERRUPT );
});
me.owner.trigger('stopUpload');
},
/**
* 判断`Uplaode`r是否正在上传中。
* @grammar isInProgress() => Boolean
* @method isInProgress
* @for Uploader
*/
isInProgress: function() {
return !!this.runing;
},
getStats: function() {
return this.request('get-stats');
},
/**
* 掉过一个文件上传,直接标记指定文件为已上传状态。
* @grammar skipFile( file ) => undefined
* @method skipFile
* @for Uploader
*/
skipFile: function( file, status ) {
file = this.request( 'get-file', file );
file.setStatus( status || Status.COMPLETE );
file.skipped = true;
// 如果正在上传。
file.blocks && $.each( file.blocks, function( _, v ) {
var _tr = v.transport;
if ( _tr ) {
_tr.abort();
_tr.destroy();
delete v.transport;
}
});
this.owner.trigger( 'uploadSkip', file );
},
/**
* @event uploadFinished
* @description 当所有文件上传结束时触发。
* @for Uploader
*/
_tick: function() {
var me = this,
opts = me.options,
fn, val;
// 上一个promise还没有结束,则等待完成后再执行。
if ( me._promise ) {
return me._promise.always( me.__tick );
}
// 还有位置,且还有文件要处理的话。
if ( me.pool.length < opts.threads && (val = me._nextBlock()) ) {
me._trigged = false;
fn = function( val ) {
me._promise = null;
// 有可能是reject过来的,所以要检测val的类型。
val && val.file && me._startSend( val );
Base.nextTick( me.__tick );
};
me._promise = isPromise( val ) ? val.always( fn ) : fn( val );
// 没有要上传的了,且没有正在传输的了。
} else if ( !me.remaning && !me.getStats().numOfQueue ) {
me.runing = false;
me._trigged || Base.nextTick(function() {
me.owner.trigger('uploadFinished');
});
me._trigged = true;
}
},
_nextBlock: function() {
var me = this,
act = me._act,
opts = me.options,
next, done;
// 如果当前文件还有没有需要传输的,则直接返回剩下的。
if ( act && act.has() &&
act.file.getStatus() === Status.PROGRESS ) {
// 是否提前准备下一个文件
if ( opts.prepareNextFile && !me.pending.length ) {
me._prepareNextFile();
}
return act.fetch();
// 否则,如果正在运行,则准备下一个文件,并等待完成后返回下个分片。
} else if ( me.runing ) {
// 如果缓存中有,则直接在缓存中取,没有则去queue中取。
if ( !me.pending.length && me.getStats().numOfQueue ) {
me._prepareNextFile();
}
next = me.pending.shift();
done = function( file ) {
if ( !file ) {
return null;
}
act = CuteFile( file, opts.chunked ? opts.chunkSize : 0 );
me._act = act;
return act.fetch();
};
// 文件可能还在prepare中,也有可能已经完全准备好了。
return isPromise( next ) ?
next[ next.pipe ? 'pipe' : 'then']( done ) :
done( next );
}
},
/**
* @event uploadStart
* @param {File} file File对象
* @description 某个文件开始上传前触发,一个文件只会触发一次。
* @for Uploader
*/
_prepareNextFile: function() {
var me = this,
file = me.request('fetch-file'),
pending = me.pending,
promise;
if ( file ) {
promise = me.request( 'before-send-file', file, function() {
// 有可能文件被skip掉了。文件被skip掉后,状态坑定不是Queued.
if ( file.getStatus() === Status.QUEUED ) {
me.owner.trigger( 'uploadStart', file );
file.setStatus( Status.PROGRESS );
return file;
}
return me._finishFile( file );
});
// 如果还在pending中,则替换成文件本身。
promise.done(function() {
var idx = $.inArray( promise, pending );
~idx && pending.splice( idx, 1, file );
});
// befeore-send-file的钩子就有错误发生。
promise.fail(function( reason ) {
file.setStatus( Status.ERROR, reason );
me.owner.trigger( 'uploadError', file, reason );
me.owner.trigger( 'uploadComplete', file );
});
pending.push( promise );
}
},
// 让出位置了,可以让其他分片开始上传
_popBlock: function( block ) {
var idx = $.inArray( block, this.pool );
this.pool.splice( idx, 1 );
block.file.remaning--;
this.remaning--;
},
// 开始上传,可以被掉过。如果promise被reject了,则表示跳过此分片。
_startSend: function( block ) {
var me = this,
file = block.file,
promise;
me.pool.push( block );
me.remaning++;
// 如果没有分片,则直接使用原始的。
// 不会丢失content-type信息。
block.blob = block.chunks === 1 ? file.source :
file.source.slice( block.start, block.end );
// hook, 每个分片发送之前可能要做些异步的事情。
promise = me.request( 'before-send', block, function() {
// 有可能文件已经上传出错了,所以不需要再传输了。
if ( file.getStatus() === Status.PROGRESS ) {
me._doSend( block );
} else {
me._popBlock( block );
Base.nextTick( me.__tick );
}
});
// 如果为fail了,则跳过此分片。
promise.fail(function() {
if ( file.remaning === 1 ) {
me._finishFile( file ).always(function() {
block.percentage = 1;
me._popBlock( block );
me.owner.trigger( 'uploadComplete', file );
Base.nextTick( me.__tick );
});
} else {
block.percentage = 1;
me._popBlock( block );
Base.nextTick( me.__tick );
}
});
},
/**
* @event uploadBeforeSend
* @param {Object} object
* @param {Object} data Default的上传参数,可以扩展此对象来控制上传参数。
* @description 当某个文件的分块在发送前触发,主要用来询问是否要添加附带参数,大文件在开起分片上传的前提下此事件可能会触发多次。
* @for Uploader
*/
/**
* @event uploadAccept
* @param {Object} object
* @param {Object} ret 服务端的返回数据,json格式,如果服务端不是json格式,从ret._raw中取数据,自行解析。
* @description 当某个文件上传到服务端响应后,会派送此事件来询问服务端响应是否有效。如果此事件handler返回值为`false`, 则此文件将派送`server`类型的`uploadError`事件。
* @for Uploader
*/
/**
* @event uploadProgress
* @param {File} file File对象
* @param {Number} percentage 上传进度
* @description 上传过程中触发,携带上传进度。
* @for Uploader
*/
/**
* @event uploadError
* @param {File} file File对象
* @param {String} reason 出错的code
* @description 当文件上传出错时触发。
* @for Uploader
*/
/**
* @event uploadSuccess
* @param {File} file File对象
* @param {Object} response 服务端返回的数据
* @description 当文件上传成功时触发。
* @for Uploader
*/
/**
* @event uploadComplete
* @param {File} [file] File对象
* @description 不管成功或者失败,文件上传完成时触发。
* @for Uploader
*/
// 做上传operating。
_doSend: function( block ) {
var me = this,
owner = me.owner,
opts = me.options,
file = block.file,
tr = new Transport( opts ),
data = $.extend({}, opts.formData ),
headers = $.extend({}, opts.headers ),
requestAccept, ret;
block.transport = tr;
tr.on( 'destroy', function() {
delete block.transport;
me._popBlock( block );
Base.nextTick( me.__tick );
});
// 广播上传进度。以文件为单位。
tr.on( 'progress', function( percentage ) {
var totalPercent = 0,
uploaded = 0;
// 可能没有abort掉,progress还是执行进来了。
// if ( !file.blocks ) {
// return;
// }
totalPercent = block.percentage = percentage;
if ( block.chunks > 1 ) { // 计算文件的整体速度。
$.each( file.blocks, function( _, v ) {
uploaded += (v.percentage || 0) * (v.end - v.start);
});
totalPercent = uploaded / file.size;
}
owner.trigger( 'uploadProgress', file, totalPercent || 0 );
});
// 用来询问,是否返回的结果是有错误的。
requestAccept = function( reject ) {
var fn;
ret = tr.getResponseAsJson() || {};
ret._raw = tr.getResponse();
fn = function( value ) {
reject = value;
};
// 服务端响应了,不代表成功了,询问是否响应正确。
if ( !owner.trigger( 'uploadAccept', block, ret, fn ) ) {
reject = reject || 'server';
}
return reject;
};
// 尝试重试,然后广播文件上传出错。
tr.on( 'error', function( type, flag ) {
block.retried = block.retried || 0;
// 自动重试
if ( block.chunks > 1 && ~'http,abort'.indexOf( type ) &&
block.retried < opts.chunkRetry ) {
block.retried++;
tr.send();
} else {
// http status 500 ~ 600
if ( !flag && type === 'server' ) {
type = requestAccept( type );
}
file.setStatus( Status.ERROR, type );
owner.trigger( 'uploadError', file, type );
owner.trigger( 'uploadComplete', file );
}
});
// 上传成功
tr.on( 'load', function() {
var reason;
// 如果非预期,转向上传出错。
if ( (reason = requestAccept()) ) {
tr.trigger( 'error', reason, true );
return;
}
// 全部上传完成。
if ( file.remaning === 1 ) {
me._finishFile( file, ret );
} else {
tr.destroy();
}
});
// ConfigurationDefault的上传字段。
data = $.extend( data, {
id: file.id,
name: file.name,
type: file.type,
lastModifiedDate: file.lastModifiedDate,
size: file.size
});
block.chunks > 1 && $.extend( data, {
chunks: block.chunks,
chunk: block.chunk
});
// 在发送之间可以添加字段什么的。。。
// 如果Default的字段不够使用,可以通过监听此事件来扩展
owner.trigger( 'uploadBeforeSend', block, data, headers );
// 开始发送。
tr.appendBlob( opts.fileVal, block.blob, file.name );
tr.append( data );
tr.setRequestHeader( headers );
tr.send();
},
// 完成上传。
_finishFile: function( file, ret, hds ) {
var owner = this.owner;
return owner
.request( 'after-send-file', arguments, function() {
file.setStatus( Status.COMPLETE );
owner.trigger( 'uploadSuccess', file, ret, hds );
})
.fail(function( reason ) {
// 如果外部已经标记为invalid什么的,不再改状态。
if ( file.getStatus() === Status.PROGRESS ) {
file.setStatus( Status.ERROR, reason );
}
owner.trigger( 'uploadError', file, reason );
})
.always(function() {
owner.trigger( 'uploadComplete', file );
});
}
});
});
/**
* @fileOverview Runtime管理器,负责Runtime的选择, 连接
*/
define('runtime/compbase',[],function() {
function CompBase( owner, runtime ) {
this.owner = owner;
this.options = owner.options;
this.getRuntime = function() {
return runtime;
};
this.getRuid = function() {
return runtime.uid;
};
this.trigger = function() {
return owner.trigger.apply( owner, arguments );
};
}
return CompBase;
});
/**
* @fileOverview Html5Runtime
*/
define('runtime/html5/runtime',[
'base',
'runtime/runtime',
'runtime/compbase'
], function( Base, Runtime, CompBase ) {
var type = 'html5',
components = {};
function Html5Runtime() {
var pool = {},
me = this,
destory = this.destory;
Runtime.apply( me, arguments );
me.type = type;
// 这个方法的调用者,实际上是RuntimeClient
me.exec = function( comp, fn/*, args...*/) {
var client = this,
uid = client.uid,
args = Base.slice( arguments, 2 ),
instance;
if ( components[ comp ] ) {
instance = pool[ uid ] = pool[ uid ] ||
new components[ comp ]( client, me );
if ( instance[ fn ] ) {
return instance[ fn ].apply( instance, args );
}
}
};
me.destory = function() {
// @todo 删除池子中的所有实例
return destory && destory.apply( this, arguments );
};
}
Base.inherits( Runtime, {
constructor: Html5Runtime,
// 不需要连接其他程序,直接执行callback
init: function() {
var me = this;
setTimeout(function() {
me.trigger('ready');
}, 1 );
}
});
// 注册Components
Html5Runtime.register = function( name, component ) {
var klass = components[ name ] = Base.inherits( CompBase, component );
return klass;
};
// 注册html5运行时。
// 只有在支持的前提下注册。
if ( window.Blob && window.FileReader && window.DataView ) {
Runtime.addRuntime( type, Html5Runtime );
}
return Html5Runtime;
});
/**
* @fileOverview Blob Html实现
*/
define('runtime/html5/blob',[
'runtime/html5/runtime',
'lib/blob'
], function( Html5Runtime, Blob ) {
return Html5Runtime.register( 'Blob', {
slice: function( start, end ) {
var blob = this.owner.source,
slice = blob.slice || blob.webkitSlice || blob.mozSlice;
blob = slice.call( blob, start, end );
return new Blob( this.getRuid(), blob );
}
});
});
/**
* @fileOverview FilePicker
*/
define('runtime/html5/filepicker',[
'base',
'runtime/html5/runtime'
], function( Base, Html5Runtime ) {
var $ = Base.$;
return Html5Runtime.register( 'FilePicker', {
init: function() {
var container = this.getRuntime().getContainer(),
me = this,
owner = me.owner,
opts = me.options,
lable = $( document.createElement('label') ),
input = $( document.createElement('input') ),
arr, i, len, mouseHandler;
input.attr( 'type', 'file' );
input.attr( 'name', opts.name );
input.addClass('webuploader-element-invisible');
lable.on( 'click', function() {
input.trigger('click');
});
lable.css({
opacity: 0,
width: '100%',
height: '100%',
display: 'block',
cursor: 'pointer',
background: '#ffffff'
});
if ( opts.multiple ) {
input.attr( 'multiple', 'multiple' );
}
// @todo Firefox不支持单独指定后缀
if ( opts.accept && opts.accept.length > 0 ) {
arr = [];
for ( i = 0, len = opts.accept.length; i < len; i++ ) {
arr.push( opts.accept[ i ].mimeTypes );
}
input.attr( 'accept', arr.join(',') );
}
container.append( input );
container.append( lable );
mouseHandler = function( e ) {
owner.trigger( e.type );
};
input.on( 'change', function( e ) {
var fn = arguments.callee,
clone;
me.files = e.target.files;
// reset input
clone = this.cloneNode( true );
this.parentNode.replaceChild( clone, this );
input.off();
input = $( clone ).on( 'change', fn )
.on( 'mouseenter mouseleave', mouseHandler );
owner.trigger('change');
});
lable.on( 'mouseenter mouseleave', mouseHandler );
},
getFiles: function() {
return this.files;
},
destroy: function() {
// todo
}
});
});
/**
* Terms:
*
* Uint8Array, FileReader, BlobBuilder, atob, ArrayBuffer
* @fileOverview Image控件
*/
define('runtime/html5/util',[
'base'
], function( Base ) {
var urlAPI = window.createObjectURL && window ||
window.URL && URL.revokeObjectURL && URL ||
window.webkitURL,
createObjectURL = Base.noop,
revokeObjectURL = createObjectURL;
if ( urlAPI ) {
// 更安全的方式调用,比如android里面就能把context改成其他的对象。
createObjectURL = function() {
return urlAPI.createObjectURL.apply( urlAPI, arguments );
};
revokeObjectURL = function() {
return urlAPI.revokeObjectURL.apply( urlAPI, arguments );
};
}
return {
createObjectURL: createObjectURL,
revokeObjectURL: revokeObjectURL,
dataURL2Blob: function( dataURI ) {
var byteStr, intArray, ab, i, mimetype, parts;
parts = dataURI.split(',');
if ( ~parts[ 0 ].indexOf('base64') ) {
byteStr = atob( parts[ 1 ] );
} else {
byteStr = decodeURIComponent( parts[ 1 ] );
}
ab = new ArrayBuffer( byteStr.length );
intArray = new Uint8Array( ab );
for ( i = 0; i < byteStr.length; i++ ) {
intArray[ i ] = byteStr.charCodeAt( i );
}
mimetype = parts[ 0 ].split(':')[ 1 ].split(';')[ 0 ];
return this.arrayBufferToBlob( ab, mimetype );
},
dataURL2ArrayBuffer: function( dataURI ) {
var byteStr, intArray, i, parts;
parts = dataURI.split(',');
if ( ~parts[ 0 ].indexOf('base64') ) {
byteStr = atob( parts[ 1 ] );
} else {
byteStr = decodeURIComponent( parts[ 1 ] );
}
intArray = new Uint8Array( byteStr.length );
for ( i = 0; i < byteStr.length; i++ ) {
intArray[ i ] = byteStr.charCodeAt( i );
}
return intArray.buffer;
},
arrayBufferToBlob: function( buffer, type ) {
var builder = window.BlobBuilder || window.WebKitBlobBuilder,
bb;
// android不支持直接new Blob, 只能借助blobbuilder.
if ( builder ) {
bb = new builder();
bb.append( buffer );
return bb.getBlob( type );
}
return new Blob([ buffer ], type ? { type: type } : {} );
},
// 抽出来主要是为了解决android下面canvas.toDataUrl不支持jpeg.
// 你得到的结果是png.
canvasToDataUrl: function( canvas, type, quality ) {
return canvas.toDataURL( type, quality / 100 );
},
// imagemeat会复写这个方法,如果用户选择加载那个文件了的话。
parseMeta: function( blob, callback ) {
callback( false, {});
},
// imagemeat会复写这个方法,如果用户选择加载那个文件了的话。
updateImageHead: function( data ) {
return data;
}
};
});
/**
* Terms:
*
* Uint8Array, FileReader, BlobBuilder, atob, ArrayBuffer
* @fileOverview Image控件
*/
define('runtime/html5/imagemeta',[
'runtime/html5/util'
], function( Util ) {
var api;
api = {
parsers: {
0xffe1: []
},
maxMetaDataSize: 262144,
parse: function( blob, cb ) {
var me = this,
fr = new FileReader();
fr.onload = function() {
cb( false, me._parse( this.result ) );
fr = fr.onload = fr.onerror = null;
};
fr.onerror = function( e ) {
cb( e.message );
fr = fr.onload = fr.onerror = null;
};
blob = blob.slice( 0, me.maxMetaDataSize );
fr.readAsArrayBuffer( blob.getSource() );
},
_parse: function( buffer, noParse ) {
if ( buffer.byteLength < 6 ) {
return;
}
var dataview = new DataView( buffer ),
offset = 2,
maxOffset = dataview.byteLength - 4,
headLength = offset,
ret = {},
markerBytes, markerLength, parsers, i;
if ( dataview.getUint16( 0 ) === 0xffd8 ) {
while ( offset < maxOffset ) {
markerBytes = dataview.getUint16( offset );
if ( markerBytes >= 0xffe0 && markerBytes <= 0xffef ||
markerBytes === 0xfffe ) {
markerLength = dataview.getUint16( offset + 2 ) + 2;
if ( offset + markerLength > dataview.byteLength ) {
break;
}
parsers = api.parsers[ markerBytes ];
if ( !noParse && parsers ) {
for ( i = 0; i < parsers.length; i += 1 ) {
parsers[ i ].call( api, dataview, offset,
markerLength, ret );
}
}
offset += markerLength;
headLength = offset;
} else {
break;
}
}
if ( headLength > 6 ) {
if ( buffer.slice ) {
ret.imageHead = buffer.slice( 2, headLength );
} else {
// Workaround for IE10, which does not yet
// support ArrayBuffer.slice:
ret.imageHead = new Uint8Array( buffer )
.subarray( 2, headLength );
}
}
}
return ret;
},
updateImageHead: function( buffer, head ) {
var data = this._parse( buffer, true ),
buf1, buf2, bodyoffset;
bodyoffset = 2;
if ( data.imageHead ) {
bodyoffset = 2 + data.imageHead.byteLength;
}
if ( buffer.slice ) {
buf2 = buffer.slice( bodyoffset );
} else {
buf2 = new Uint8Array( buffer ).subarray( bodyoffset );
}
buf1 = new Uint8Array( head.byteLength + 2 + buf2.byteLength );
buf1[ 0 ] = 0xFF;
buf1[ 1 ] = 0xD8;
buf1.set( new Uint8Array( head ), 2 );
buf1.set( new Uint8Array( buf2 ), head.byteLength + 2 );
return buf1.buffer;
}
};
Util.parseMeta = function() {
return api.parse.apply( api, arguments );
};
Util.updateImageHead = function() {
return api.updateImageHead.apply( api, arguments );
};
return api;
});
/**
* Code来自于:https://github.com/blueimp/JavaScript-Load-Image
* 暂时项目中只用了orientation.
*
* 去除了 Exif Sub IFD Pointer, GPS Info IFD Pointer, Exif Thumbnail.
* @fileOverview EXIF解析
*/
// Sample
// ====================================
// Make : Apple
// Model : iPhone 4S
// Orientation : 1
// XResolution : 72 [72/1]
// YResolution : 72 [72/1]
// ResolutionUnit : 2
// Software : QuickTime 7.7.1
// DateTime : 2013:09:01 22:53:55
// ExifIFDPointer : 190
// ExposureTime : 0.058823529411764705 [1/17]
// FNumber : 2.4 [12/5]
// ExposureProgram : Normal program
// ISOSpeedRatings : 800
// ExifVersion : 0220
// DateTimeOriginal : 2013:09:01 22:52:51
// DateTimeDigitized : 2013:09:01 22:52:51
// ComponentsConfiguration : YCbCr
// ShutterSpeedValue : 4.058893515764426
// ApertureValue : 2.5260688216892597 [4845/1918]
// BrightnessValue : -0.3126686601998395
// MeteringMode : Pattern
// Flash : Flash did not fire, compulsory flash mode
// FocalLength : 4.28 [107/25]
// SubjectArea : [4 values]
// FlashpixVersion : 0100
// ColorSpace : 1
// PixelXDimension : 2448
// PixelYDimension : 3264
// SensingMethod : One-chip color area sensor
// ExposureMode : 0
// WhiteBalance : Auto white balance
// FocalLengthIn35mmFilm : 35
// SceneCaptureType : Standard
define('runtime/html5/imagemeta/exif',[
'base',
'runtime/html5/imagemeta'
], function( Base, ImageMeta ) {
var EXIF = {};
EXIF.ExifMap = function() {
return this;
};
EXIF.ExifMap.prototype.map = {
'Orientation': 0x0112
};
EXIF.ExifMap.prototype.get = function( id ) {
return this[ id ] || this[ this.map[ id ] ];
};
EXIF.exifTagTypes = {
// byte, 8-bit unsigned int:
1: {
getValue: function( dataView, dataOffset ) {
return dataView.getUint8( dataOffset );
},
size: 1
},
// ascii, 8-bit byte:
2: {
getValue: function( dataView, dataOffset ) {
return String.fromCharCode( dataView.getUint8( dataOffset ) );
},
size: 1,
ascii: true
},
// short, 16 bit int:
3: {
getValue: function( dataView, dataOffset, littleEndian ) {
return dataView.getUint16( dataOffset, littleEndian );
},
size: 2
},
// long, 32 bit int:
4: {
getValue: function( dataView, dataOffset, littleEndian ) {
return dataView.getUint32( dataOffset, littleEndian );
},
size: 4
},
// rational = two long values,
// first is numerator, second is denominator:
5: {
getValue: function( dataView, dataOffset, littleEndian ) {
return dataView.getUint32( dataOffset, littleEndian ) /
dataView.getUint32( dataOffset + 4, littleEndian );
},
size: 8
},
// slong, 32 bit signed int:
9: {
getValue: function( dataView, dataOffset, littleEndian ) {
return dataView.getInt32( dataOffset, littleEndian );
},
size: 4
},
// srational, two slongs, first is numerator, second is denominator:
10: {
getValue: function( dataView, dataOffset, littleEndian ) {
return dataView.getInt32( dataOffset, littleEndian ) /
dataView.getInt32( dataOffset + 4, littleEndian );
},
size: 8
}
};
// undefined, 8-bit byte, value depending on field:
EXIF.exifTagTypes[ 7 ] = EXIF.exifTagTypes[ 1 ];
EXIF.getExifValue = function( dataView, tiffOffset, offset, type, length,
littleEndian ) {
var tagType = EXIF.exifTagTypes[ type ],
tagSize, dataOffset, values, i, str, c;
if ( !tagType ) {
Base.log('Invalid Exif data: Invalid tag type.');
return;
}
tagSize = tagType.size * length;
// Determine if the value is contained in the dataOffset bytes,
// or if the value at the dataOffset is a pointer to the actual data:
dataOffset = tagSize > 4 ? tiffOffset + dataView.getUint32( offset + 8,
littleEndian ) : (offset + 8);
if ( dataOffset + tagSize > dataView.byteLength ) {
Base.log('Invalid Exif data: Invalid data offset.');
return;
}
if ( length === 1 ) {
return tagType.getValue( dataView, dataOffset, littleEndian );
}
values = [];
for ( i = 0; i < length; i += 1 ) {
values[ i ] = tagType.getValue( dataView,
dataOffset + i * tagType.size, littleEndian );
}
if ( tagType.ascii ) {
str = '';
// Concatenate the chars:
for ( i = 0; i < values.length; i += 1 ) {
c = values[ i ];
// Ignore the terminating NULL byte(s):
if ( c === '\u0000' ) {
break;
}
str += c;
}
return str;
}
return values;
};
EXIF.parseExifTag = function( dataView, tiffOffset, offset, littleEndian,
data ) {
var tag = dataView.getUint16( offset, littleEndian );
data.exif[ tag ] = EXIF.getExifValue( dataView, tiffOffset, offset,
dataView.getUint16( offset + 2, littleEndian ), // tag type
dataView.getUint32( offset + 4, littleEndian ), // tag length
littleEndian );
};
EXIF.parseExifTags = function( dataView, tiffOffset, dirOffset,
littleEndian, data ) {
var tagsNumber, dirEndOffset, i;
if ( dirOffset + 6 > dataView.byteLength ) {
Base.log('Invalid Exif data: Invalid directory offset.');
return;
}
tagsNumber = dataView.getUint16( dirOffset, littleEndian );
dirEndOffset = dirOffset + 2 + 12 * tagsNumber;
if ( dirEndOffset + 4 > dataView.byteLength ) {
Base.log('Invalid Exif data: Invalid directory size.');
return;
}
for ( i = 0; i < tagsNumber; i += 1 ) {
this.parseExifTag( dataView, tiffOffset,
dirOffset + 2 + 12 * i, // tag offset
littleEndian, data );
}
// Return the offset to the next directory:
return dataView.getUint32( dirEndOffset, littleEndian );
};
// EXIF.getExifThumbnail = function(dataView, offset, length) {
// var hexData,
// i,
// b;
// if (!length || offset + length > dataView.byteLength) {
// Base.log('Invalid Exif data: Invalid thumbnail data.');
// return;
// }
// hexData = [];
// for (i = 0; i < length; i += 1) {
// b = dataView.getUint8(offset + i);
// hexData.push((b < 16 ? '0' : '') + b.toString(16));
// }
// return 'data:image/jpeg,%' + hexData.join('%');
// };
EXIF.parseExifData = function( dataView, offset, length, data ) {
var tiffOffset = offset + 10,
littleEndian, dirOffset;
// Check for the ASCII code for "Exif" (0x45786966):
if ( dataView.getUint32( offset + 4 ) !== 0x45786966 ) {
// No Exif data, might be XMP data instead
return;
}
if ( tiffOffset + 8 > dataView.byteLength ) {
Base.log('Invalid Exif data: Invalid segment size.');
return;
}
// Check for the two null bytes:
if ( dataView.getUint16( offset + 8 ) !== 0x0000 ) {
Base.log('Invalid Exif data: Missing byte alignment offset.');
return;
}
// Check the byte alignment:
switch ( dataView.getUint16( tiffOffset ) ) {
case 0x4949:
littleEndian = true;
break;
case 0x4D4D:
littleEndian = false;
break;
default:
Base.log('Invalid Exif data: Invalid byte alignment marker.');
return;
}
// Check for the TIFF tag marker (0x002A):
if ( dataView.getUint16( tiffOffset + 2, littleEndian ) !== 0x002A ) {
Base.log('Invalid Exif data: Missing TIFF marker.');
return;
}
// Retrieve the directory offset bytes, usually 0x00000008 or 8 decimal:
dirOffset = dataView.getUint32( tiffOffset + 4, littleEndian );
// Create the exif object to store the tags:
data.exif = new EXIF.ExifMap();
// Parse the tags of the main image directory and retrieve the
// offset to the next directory, usually the thumbnail directory:
dirOffset = EXIF.parseExifTags( dataView, tiffOffset,
tiffOffset + dirOffset, littleEndian, data );
// 尝试读取缩略图
// if ( dirOffset ) {
// thumbnailData = {exif: {}};
// dirOffset = EXIF.parseExifTags(
// dataView,
// tiffOffset,
// tiffOffset + dirOffset,
// littleEndian,
// thumbnailData
// );
// // Check for JPEG Thumbnail offset:
// if (thumbnailData.exif[0x0201]) {
// data.exif.Thumbnail = EXIF.getExifThumbnail(
// dataView,
// tiffOffset + thumbnailData.exif[0x0201],
// thumbnailData.exif[0x0202] // Thumbnail data length
// );
// }
// }
};
ImageMeta.parsers[ 0xffe1 ].push( EXIF.parseExifData );
return EXIF;
});
/**
* @fileOverview Image
*/
define('runtime/html5/image',[
'base',
'runtime/html5/runtime',
'runtime/html5/util'
], function( Base, Html5Runtime, Util ) {
var BLANK = 'data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D';
return Html5Runtime.register( 'Image', {
// flag: 标记是否被修改过。
modified: false,
init: function() {
var me = this,
img = new Image();
img.onload = function() {
me._info = {
type: me.type,
width: this.width,
height: this.height
};
// 读取meta信息。
if ( !me._metas && 'image/jpeg' === me.type ) {
Util.parseMeta( me._blob, function( error, ret ) {
me._metas = ret;
me.owner.trigger('load');
});
} else {
me.owner.trigger('load');
}
};
img.onerror = function() {
me.owner.trigger('error');
};
me._img = img;
},
loadFromBlob: function( blob ) {
var me = this,
img = me._img;
me._blob = blob;
me.type = blob.type;
img.src = Util.createObjectURL( blob.getSource() );
me.owner.once( 'load', function() {
Util.revokeObjectURL( img.src );
});
},
resize: function( width, height ) {
var canvas = this._canvas ||
(this._canvas = document.createElement('canvas'));
this._resize( this._img, canvas, width, height );
this._blob = null; // 没用了,可以删掉了。
this.modified = true;
this.owner.trigger('complete');
},
getAsBlob: function( type ) {
var blob = this._blob,
opts = this.options,
canvas;
type = type || this.type;
// blob需要重新生成。
if ( this.modified || this.type !== type ) {
canvas = this._canvas;
if ( type === 'image/jpeg' ) {
blob = Util.canvasToDataUrl( canvas, 'image/jpeg',
opts.quality );
if ( opts.preserveHeaders && this._metas &&
this._metas.imageHead ) {
blob = Util.dataURL2ArrayBuffer( blob );
blob = Util.updateImageHead( blob,
this._metas.imageHead );
blob = Util.arrayBufferToBlob( blob, type );
return blob;
}
} else {
blob = Util.canvasToDataUrl( canvas, type );
}
blob = Util.dataURL2Blob( blob );
}
return blob;
},
getAsDataUrl: function( type ) {
var opts = this.options;
type = type || this.type;
if ( type === 'image/jpeg' ) {
return Util.canvasToDataUrl( this._canvas, type, opts.quality );
} else {
return this._canvas.toDataURL( type );
}
},
getOrientation: function() {
return this._metas && this._metas.exif &&
this._metas.exif.get('Orientation') || 1;
},
info: function( val ) {
// setter
if ( val ) {
this._info = val;
return this;
}
// getter
return this._info;
},
meta: function( val ) {
// setter
if ( val ) {
this._meta = val;
return this;
}
// getter
return this._meta;
},
destroy: function() {
var canvas = this._canvas;
this._img.onload = null;
if ( canvas ) {
canvas.getContext('2d')
.clearRect( 0, 0, canvas.width, canvas.height );
canvas.width = canvas.height = 0;
this._canvas = null;
}
// 释放内存。非常重要,否则释放不了image的内存。
this._img.src = BLANK;
this._img = this._blob = null;
},
_resize: function( img, cvs, width, height ) {
var opts = this.options,
naturalWidth = img.width,
naturalHeight = img.height,
orientation = this.getOrientation(),
scale, w, h, x, y;
// values that require 90 degree rotation
if ( ~[ 5, 6, 7, 8 ].indexOf( orientation ) ) {
// 交换width, height的值。
width ^= height;
height ^= width;
width ^= height;
}
scale = Math[ opts.crop ? 'max' : 'min' ]( width / naturalWidth,
height / naturalHeight );
// 不允许放大。
opts.allowMagnify || (scale = Math.min( 1, scale ));
w = naturalWidth * scale;
h = naturalHeight * scale;
if ( opts.crop ) {
cvs.width = width;
cvs.height = height;
} else {
cvs.width = w;
cvs.height = h;
}
x = (cvs.width - w) / 2;
y = (cvs.height - h) / 2;
opts.preserveHeaders || this._rotate2Orientaion( cvs, orientation );
this._renderImageToCanvas( cvs, img, x, y, w, h );
},
_rotate2Orientaion: function( canvas, orientation ) {
var width = canvas.width,
height = canvas.height,
ctx = canvas.getContext('2d');
switch ( orientation ) {
case 5:
case 6:
case 7:
case 8:
canvas.width = height;
canvas.height = width;
break;
}
switch ( orientation ) {
case 2: // horizontal flip
ctx.translate( width, 0 );
ctx.scale( -1, 1 );
break;
case 3: // 180 rotate left
ctx.translate( width, height );
ctx.rotate( Math.PI );
break;
case 4: // vertical flip
ctx.translate( 0, height );
ctx.scale( 1, -1 );
break;
case 5: // vertical flip + 90 rotate right
ctx.rotate( 0.5 * Math.PI );
ctx.scale( 1, -1 );
break;
case 6: // 90 rotate right
ctx.rotate( 0.5 * Math.PI );
ctx.translate( 0, -height );
break;
case 7: // horizontal flip + 90 rotate right
ctx.rotate( 0.5 * Math.PI );
ctx.translate( width, -height );
ctx.scale( -1, 1 );
break;
case 8: // 90 rotate left
ctx.rotate( -0.5 * Math.PI );
ctx.translate( -width, 0 );
break;
}
},
// https://github.com/stomita/ios-imagefile-megapixel/
// blob/master/src/megapix-image.js
_renderImageToCanvas: (function() {
// 如果不是ios, 不需要这么复杂!
if ( !Base.os.ios ) {
return function( canvas, img, x, y, w, h ) {
canvas.getContext('2d').drawImage( img, x, y, w, h );
};
}
/**
* Detecting vertical squash in loaded image.
* Fixes a bug which squash image vertically while drawing into
* canvas for some images.
*/
function detectVerticalSquash( img, iw, ih ) {
var canvas = document.createElement('canvas'),
ctx = canvas.getContext('2d'),
sy = 0,
ey = ih,
py = ih,
data, alpha, ratio;
canvas.width = 1;
canvas.height = ih;
ctx.drawImage( img, 0, 0 );
data = ctx.getImageData( 0, 0, 1, ih ).data;
// search image edge pixel position in case
// it is squashed vertically.
while ( py > sy ) {
alpha = data[ (py - 1) * 4 + 3 ];
if ( alpha === 0 ) {
ey = py;
} else {
sy = py;
}
py = (ey + sy) >> 1;
}
ratio = (py / ih);
return (ratio === 0) ? 1 : ratio;
}
// fix ie7 bug
// http://stackoverflow.com/questions/11929099/
// html5-canvas-drawimage-ratio-bug-ios
if ( Base.os.ios >= 7 ) {
return function( canvas, img, x, y, w, h ) {
var iw = img.naturalWidth,
ih = img.naturalHeight,
vertSquashRatio = detectVerticalSquash( img, iw, ih );
return canvas.getContext('2d').drawImage( img, 0, 0,
iw * vertSquashRatio, ih * vertSquashRatio,
x, y, w, h );
};
}
/**
* Detect subsampling in loaded image.
* In iOS, larger images than 2M pixels may be
* subsampled in rendering.
*/
function detectSubsampling( img ) {
var iw = img.naturalWidth,
ih = img.naturalHeight,
canvas, ctx;
// subsampling may happen overmegapixel image
if ( iw * ih > 1024 * 1024 ) {
canvas = document.createElement('canvas');
canvas.width = canvas.height = 1;
ctx = canvas.getContext('2d');
ctx.drawImage( img, -iw + 1, 0 );
// subsampled image becomes half smaller in rendering size.
// check alpha channel value to confirm image is covering
// edge pixel or not. if alpha value is 0
// image is not covering, hence subsampled.
return ctx.getImageData( 0, 0, 1, 1 ).data[ 3 ] === 0;
} else {
return false;
}
}
return function( canvas, img, x, y, width, height ) {
var iw = img.naturalWidth,
ih = img.naturalHeight,
ctx = canvas.getContext('2d'),
subsampled = detectSubsampling( img ),
doSquash = this.type === 'image/jpeg',
d = 1024,
sy = 0,
dy = 0,
tmpCanvas, tmpCtx, vertSquashRatio, dw, dh, sx, dx;
if ( subsampled ) {
iw /= 2;
ih /= 2;
}
ctx.save();
tmpCanvas = document.createElement('canvas');
tmpCanvas.width = tmpCanvas.height = d;
tmpCtx = tmpCanvas.getContext('2d');
vertSquashRatio = doSquash ?
detectVerticalSquash( img, iw, ih ) : 1;
dw = Math.ceil( d * width / iw );
dh = Math.ceil( d * height / ih / vertSquashRatio );
while ( sy < ih ) {
sx = 0;
dx = 0;
while ( sx < iw ) {
tmpCtx.clearRect( 0, 0, d, d );
tmpCtx.drawImage( img, -sx, -sy );
ctx.drawImage( tmpCanvas, 0, 0, d, d,
x + dx, y + dy, dw, dh );
sx += d;
dx += dw;
}
sy += d;
dy += dh;
}
ctx.restore();
tmpCanvas = tmpCtx = null;
};
})()
});
});
/**
* 这个方式性能不行,但是可以解决android里面的toDataUrl的bug
* android里面toDataUrl('image/jpege')得到的结果却是png.
*
* 所以这里没辙,只能借助这个工具
* @fileOverview jpeg encoder
*/
define('runtime/html5/jpegencoder',[], function( require, exports, module ) {
/*
Copyright (c) 2008, Adobe Systems Incorporated
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Adobe Systems Incorporated nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
JPEG encoder ported to JavaScript and optimized by Andreas Ritter, www.bytestrom.eu, 11/2009
Basic GUI blocking jpeg encoder
*/
function JPEGEncoder(quality) {
var self = this;
var fround = Math.round;
var ffloor = Math.floor;
var YTable = new Array(64);
var UVTable = new Array(64);
var fdtbl_Y = new Array(64);
var fdtbl_UV = new Array(64);
var YDC_HT;
var UVDC_HT;
var YAC_HT;
var UVAC_HT;
var bitcode = new Array(65535);
var category = new Array(65535);
var outputfDCTQuant = new Array(64);
var DU = new Array(64);
var byteout = [];
var bytenew = 0;
var bytepos = 7;
var YDU = new Array(64);
var UDU = new Array(64);
var VDU = new Array(64);
var clt = new Array(256);
var RGB_YUV_TABLE = new Array(2048);
var currentQuality;
var ZigZag = [
0, 1, 5, 6,14,15,27,28,
2, 4, 7,13,16,26,29,42,
3, 8,12,17,25,30,41,43,
9,11,18,24,31,40,44,53,
10,19,23,32,39,45,52,54,
20,22,33,38,46,51,55,60,
21,34,37,47,50,56,59,61,
35,36,48,49,57,58,62,63
];
var std_dc_luminance_nrcodes = [0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0];
var std_dc_luminance_values = [0,1,2,3,4,5,6,7,8,9,10,11];
var std_ac_luminance_nrcodes = [0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d];
var std_ac_luminance_values = [
0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,
0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,
0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,
0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,
0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,
0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,
0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,
0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,
0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,
0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,
0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,
0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,
0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,
0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,
0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,
0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
0xf9,0xfa
];
var std_dc_chrominance_nrcodes = [0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0];
var std_dc_chrominance_values = [0,1,2,3,4,5,6,7,8,9,10,11];
var std_ac_chrominance_nrcodes = [0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77];
var std_ac_chrominance_values = [
0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,
0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,
0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,
0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,
0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,
0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,
0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,
0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,
0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,
0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,
0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,
0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,
0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,
0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,
0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,
0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
0xf9,0xfa
];
function initQuantTables(sf){
var YQT = [
16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68,109,103, 77,
24, 35, 55, 64, 81,104,113, 92,
49, 64, 78, 87,103,121,120,101,
72, 92, 95, 98,112,100,103, 99
];
for (var i = 0; i < 64; i++) {
var t = ffloor((YQT[i]*sf+50)/100);
if (t < 1) {
t = 1;
} else if (t > 255) {
t = 255;
}
YTable[ZigZag[i]] = t;
}
var UVQT = [
17, 18, 24, 47, 99, 99, 99, 99,
18, 21, 26, 66, 99, 99, 99, 99,
24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99
];
for (var j = 0; j < 64; j++) {
var u = ffloor((UVQT[j]*sf+50)/100);
if (u < 1) {
u = 1;
} else if (u > 255) {
u = 255;
}
UVTable[ZigZag[j]] = u;
}
var aasf = [
1.0, 1.387039845, 1.306562965, 1.175875602,
1.0, 0.785694958, 0.541196100, 0.275899379
];
var k = 0;
for (var row = 0; row < 8; row++)
{
for (var col = 0; col < 8; col++)
{
fdtbl_Y[k] = (1.0 / (YTable [ZigZag[k]] * aasf[row] * aasf[col] * 8.0));
fdtbl_UV[k] = (1.0 / (UVTable[ZigZag[k]] * aasf[row] * aasf[col] * 8.0));
k++;
}
}
}
function computeHuffmanTbl(nrcodes, std_table){
var codevalue = 0;
var pos_in_table = 0;
var HT = new Array();
for (var k = 1; k <= 16; k++) {
for (var j = 1; j <= nrcodes[k]; j++) {
HT[std_table[pos_in_table]] = [];
HT[std_table[pos_in_table]][0] = codevalue;
HT[std_table[pos_in_table]][1] = k;
pos_in_table++;
codevalue++;
}
codevalue*=2;
}
return HT;
}
function initHuffmanTbl()
{
YDC_HT = computeHuffmanTbl(std_dc_luminance_nrcodes,std_dc_luminance_values);
UVDC_HT = computeHuffmanTbl(std_dc_chrominance_nrcodes,std_dc_chrominance_values);
YAC_HT = computeHuffmanTbl(std_ac_luminance_nrcodes,std_ac_luminance_values);
UVAC_HT = computeHuffmanTbl(std_ac_chrominance_nrcodes,std_ac_chrominance_values);
}
function initCategoryNumber()
{
var nrlower = 1;
var nrupper = 2;
for (var cat = 1; cat <= 15; cat++) {
//Positive numbers
for (var nr = nrlower; nr<nrupper; nr++) {
category[32767+nr] = cat;
bitcode[32767+nr] = [];
bitcode[32767+nr][1] = cat;
bitcode[32767+nr][0] = nr;
}
//Negative numbers
for (var nrneg =-(nrupper-1); nrneg<=-nrlower; nrneg++) {
category[32767+nrneg] = cat;
bitcode[32767+nrneg] = [];
bitcode[32767+nrneg][1] = cat;
bitcode[32767+nrneg][0] = nrupper-1+nrneg;
}
nrlower <<= 1;
nrupper <<= 1;
}
}
function initRGBYUVTable() {
for(var i = 0; i < 256;i++) {
RGB_YUV_TABLE[i] = 19595 * i;
RGB_YUV_TABLE[(i+ 256)>>0] = 38470 * i;
RGB_YUV_TABLE[(i+ 512)>>0] = 7471 * i + 0x8000;
RGB_YUV_TABLE[(i+ 768)>>0] = -11059 * i;
RGB_YUV_TABLE[(i+1024)>>0] = -21709 * i;
RGB_YUV_TABLE[(i+1280)>>0] = 32768 * i + 0x807FFF;
RGB_YUV_TABLE[(i+1536)>>0] = -27439 * i;
RGB_YUV_TABLE[(i+1792)>>0] = - 5329 * i;
}
}
// IO functions
function writeBits(bs)
{
var value = bs[0];
var posval = bs[1]-1;
while ( posval >= 0 ) {
if (value & (1 << posval) ) {
bytenew |= (1 << bytepos);
}
posval--;
bytepos--;
if (bytepos < 0) {
if (bytenew == 0xFF) {
writeByte(0xFF);
writeByte(0);
}
else {
writeByte(bytenew);
}
bytepos=7;
bytenew=0;
}
}
}
function writeByte(value)
{
byteout.push(clt[value]); // write char directly instead of converting later
}
function writeWord(value)
{
writeByte((value>>8)&0xFF);
writeByte((value )&0xFF);
}
// DCT & quantization core
function fDCTQuant(data, fdtbl)
{
var d0, d1, d2, d3, d4, d5, d6, d7;
/* Pass 1: process rows. */
var dataOff=0;
var i;
var I8 = 8;
var I64 = 64;
for (i=0; i<I8; ++i)
{
d0 = data[dataOff];
d1 = data[dataOff+1];
d2 = data[dataOff+2];
d3 = data[dataOff+3];
d4 = data[dataOff+4];
d5 = data[dataOff+5];
d6 = data[dataOff+6];
d7 = data[dataOff+7];
var tmp0 = d0 + d7;
var tmp7 = d0 - d7;
var tmp1 = d1 + d6;
var tmp6 = d1 - d6;
var tmp2 = d2 + d5;
var tmp5 = d2 - d5;
var tmp3 = d3 + d4;
var tmp4 = d3 - d4;
/* Even part */
var tmp10 = tmp0 + tmp3; /* phase 2 */
var tmp13 = tmp0 - tmp3;
var tmp11 = tmp1 + tmp2;
var tmp12 = tmp1 - tmp2;
data[dataOff] = tmp10 + tmp11; /* phase 3 */
data[dataOff+4] = tmp10 - tmp11;
var z1 = (tmp12 + tmp13) * 0.707106781; /* c4 */
data[dataOff+2] = tmp13 + z1; /* phase 5 */
data[dataOff+6] = tmp13 - z1;
/* Odd part */
tmp10 = tmp4 + tmp5; /* phase 2 */
tmp11 = tmp5 + tmp6;
tmp12 = tmp6 + tmp7;
/* The rotator is modified from fig 4-8 to avoid extra negations. */
var z5 = (tmp10 - tmp12) * 0.382683433; /* c6 */
var z2 = 0.541196100 * tmp10 + z5; /* c2-c6 */
var z4 = 1.306562965 * tmp12 + z5; /* c2+c6 */
var z3 = tmp11 * 0.707106781; /* c4 */
var z11 = tmp7 + z3; /* phase 5 */
var z13 = tmp7 - z3;
data[dataOff+5] = z13 + z2; /* phase 6 */
data[dataOff+3] = z13 - z2;
data[dataOff+1] = z11 + z4;
data[dataOff+7] = z11 - z4;
dataOff += 8; /* advance pointer to next row */
}
/* Pass 2: process columns. */
dataOff = 0;
for (i=0; i<I8; ++i)
{
d0 = data[dataOff];
d1 = data[dataOff + 8];
d2 = data[dataOff + 16];
d3 = data[dataOff + 24];
d4 = data[dataOff + 32];
d5 = data[dataOff + 40];
d6 = data[dataOff + 48];
d7 = data[dataOff + 56];
var tmp0p2 = d0 + d7;
var tmp7p2 = d0 - d7;
var tmp1p2 = d1 + d6;
var tmp6p2 = d1 - d6;
var tmp2p2 = d2 + d5;
var tmp5p2 = d2 - d5;
var tmp3p2 = d3 + d4;
var tmp4p2 = d3 - d4;
/* Even part */
var tmp10p2 = tmp0p2 + tmp3p2; /* phase 2 */
var tmp13p2 = tmp0p2 - tmp3p2;
var tmp11p2 = tmp1p2 + tmp2p2;
var tmp12p2 = tmp1p2 - tmp2p2;
data[dataOff] = tmp10p2 + tmp11p2; /* phase 3 */
data[dataOff+32] = tmp10p2 - tmp11p2;
var z1p2 = (tmp12p2 + tmp13p2) * 0.707106781; /* c4 */
data[dataOff+16] = tmp13p2 + z1p2; /* phase 5 */
data[dataOff+48] = tmp13p2 - z1p2;
/* Odd part */
tmp10p2 = tmp4p2 + tmp5p2; /* phase 2 */
tmp11p2 = tmp5p2 + tmp6p2;
tmp12p2 = tmp6p2 + tmp7p2;
/* The rotator is modified from fig 4-8 to avoid extra negations. */
var z5p2 = (tmp10p2 - tmp12p2) * 0.382683433; /* c6 */
var z2p2 = 0.541196100 * tmp10p2 + z5p2; /* c2-c6 */
var z4p2 = 1.306562965 * tmp12p2 + z5p2; /* c2+c6 */
var z3p2 = tmp11p2 * 0.707106781; /* c4 */
var z11p2 = tmp7p2 + z3p2; /* phase 5 */
var z13p2 = tmp7p2 - z3p2;
data[dataOff+40] = z13p2 + z2p2; /* phase 6 */
data[dataOff+24] = z13p2 - z2p2;
data[dataOff+ 8] = z11p2 + z4p2;
data[dataOff+56] = z11p2 - z4p2;
dataOff++; /* advance pointer to next column */
}
// Quantize/descale the coefficients
var fDCTQuant;
for (i=0; i<I64; ++i)
{
// Apply the quantization and scaling factor & Round to nearest integer
fDCTQuant = data[i]*fdtbl[i];
outputfDCTQuant[i] = (fDCTQuant > 0.0) ? ((fDCTQuant + 0.5)|0) : ((fDCTQuant - 0.5)|0);
//outputfDCTQuant[i] = fround(fDCTQuant);
}
return outputfDCTQuant;
}
function writeAPP0()
{
writeWord(0xFFE0); // marker
writeWord(16); // length
writeByte(0x4A); // J
writeByte(0x46); // F
writeByte(0x49); // I
writeByte(0x46); // F
writeByte(0); // = "JFIF",'\0'
writeByte(1); // versionhi
writeByte(1); // versionlo
writeByte(0); // xyunits
writeWord(1); // xdensity
writeWord(1); // ydensity
writeByte(0); // thumbnwidth
writeByte(0); // thumbnheight
}
function writeSOF0(width, height)
{
writeWord(0xFFC0); // marker
writeWord(17); // length, truecolor YUV JPG
writeByte(8); // precision
writeWord(height);
writeWord(width);
writeByte(3); // nrofcomponents
writeByte(1); // IdY
writeByte(0x11); // HVY
writeByte(0); // QTY
writeByte(2); // IdU
writeByte(0x11); // HVU
writeByte(1); // QTU
writeByte(3); // IdV
writeByte(0x11); // HVV
writeByte(1); // QTV
}
function writeDQT()
{
writeWord(0xFFDB); // marker
writeWord(132); // length
writeByte(0);
for (var i=0; i<64; i++) {
writeByte(YTable[i]);
}
writeByte(1);
for (var j=0; j<64; j++) {
writeByte(UVTable[j]);
}
}
function writeDHT()
{
writeWord(0xFFC4); // marker
writeWord(0x01A2); // length
writeByte(0); // HTYDCinfo
for (var i=0; i<16; i++) {
writeByte(std_dc_luminance_nrcodes[i+1]);
}
for (var j=0; j<=11; j++) {
writeByte(std_dc_luminance_values[j]);
}
writeByte(0x10); // HTYACinfo
for (var k=0; k<16; k++) {
writeByte(std_ac_luminance_nrcodes[k+1]);
}
for (var l=0; l<=161; l++) {
writeByte(std_ac_luminance_values[l]);
}
writeByte(1); // HTUDCinfo
for (var m=0; m<16; m++) {
writeByte(std_dc_chrominance_nrcodes[m+1]);
}
for (var n=0; n<=11; n++) {
writeByte(std_dc_chrominance_values[n]);
}
writeByte(0x11); // HTUACinfo
for (var o=0; o<16; o++) {
writeByte(std_ac_chrominance_nrcodes[o+1]);
}
for (var p=0; p<=161; p++) {
writeByte(std_ac_chrominance_values[p]);
}
}
function writeSOS()
{
writeWord(0xFFDA); // marker
writeWord(12); // length
writeByte(3); // nrofcomponents
writeByte(1); // IdY
writeByte(0); // HTY
writeByte(2); // IdU
writeByte(0x11); // HTU
writeByte(3); // IdV
writeByte(0x11); // HTV
writeByte(0); // Ss
writeByte(0x3f); // Se
writeByte(0); // Bf
}
function processDU(CDU, fdtbl, DC, HTDC, HTAC){
var EOB = HTAC[0x00];
var M16zeroes = HTAC[0xF0];
var pos;
var I16 = 16;
var I63 = 63;
var I64 = 64;
var DU_DCT = fDCTQuant(CDU, fdtbl);
//ZigZag reorder
for (var j=0;j<I64;++j) {
DU[ZigZag[j]]=DU_DCT[j];
}
var Diff = DU[0] - DC; DC = DU[0];
//Encode DC
if (Diff==0) {
writeBits(HTDC[0]); // Diff might be 0
} else {
pos = 32767+Diff;
writeBits(HTDC[category[pos]]);
writeBits(bitcode[pos]);
}
//Encode ACs
var end0pos = 63; // was const... which is crazy
for (; (end0pos>0)&&(DU[end0pos]==0); end0pos--) {};
//end0pos = first element in reverse order !=0
if ( end0pos == 0) {
writeBits(EOB);
return DC;
}
var i = 1;
var lng;
while ( i <= end0pos ) {
var startpos = i;
for (; (DU[i]==0) && (i<=end0pos); ++i) {}
var nrzeroes = i-startpos;
if ( nrzeroes >= I16 ) {
lng = nrzeroes>>4;
for (var nrmarker=1; nrmarker <= lng; ++nrmarker)
writeBits(M16zeroes);
nrzeroes = nrzeroes&0xF;
}
pos = 32767+DU[i];
writeBits(HTAC[(nrzeroes<<4)+category[pos]]);
writeBits(bitcode[pos]);
i++;
}
if ( end0pos != I63 ) {
writeBits(EOB);
}
return DC;
}
function initCharLookupTable(){
var sfcc = String.fromCharCode;
for(var i=0; i < 256; i++){ ///// ACHTUNG // 255
clt[i] = sfcc(i);
}
}
this.encode = function(image,quality) // image data object
{
// var time_start = new Date().getTime();
if(quality) setQuality(quality);
// Initialize bit writer
byteout = new Array();
bytenew=0;
bytepos=7;
// Add JPEG headers
writeWord(0xFFD8); // SOI
writeAPP0();
writeDQT();
writeSOF0(image.width,image.height);
writeDHT();
writeSOS();
// Encode 8x8 macroblocks
var DCY=0;
var DCU=0;
var DCV=0;
bytenew=0;
bytepos=7;
this.encode.displayName = "_encode_";
var imageData = image.data;
var width = image.width;
var height = image.height;
var quadWidth = width*4;
var tripleWidth = width*3;
var x, y = 0;
var r, g, b;
var start,p, col,row,pos;
while(y < height){
x = 0;
while(x < quadWidth){
start = quadWidth * y + x;
p = start;
col = -1;
row = 0;
for(pos=0; pos < 64; pos++){
row = pos >> 3;// /8
col = ( pos & 7 ) * 4; // %8
p = start + ( row * quadWidth ) + col;
if(y+row >= height){ // padding bottom
p-= (quadWidth*(y+1+row-height));
}
if(x+col >= quadWidth){ // padding right
p-= ((x+col) - quadWidth +4)
}
r = imageData[ p++ ];
g = imageData[ p++ ];
b = imageData[ p++ ];
/* // calculate YUV values dynamically
YDU[pos]=((( 0.29900)*r+( 0.58700)*g+( 0.11400)*b))-128; //-0x80
UDU[pos]=(((-0.16874)*r+(-0.33126)*g+( 0.50000)*b));
VDU[pos]=((( 0.50000)*r+(-0.41869)*g+(-0.08131)*b));
*/
// use lookup table (slightly faster)
YDU[pos] = ((RGB_YUV_TABLE[r] + RGB_YUV_TABLE[(g + 256)>>0] + RGB_YUV_TABLE[(b + 512)>>0]) >> 16)-128;
UDU[pos] = ((RGB_YUV_TABLE[(r + 768)>>0] + RGB_YUV_TABLE[(g + 1024)>>0] + RGB_YUV_TABLE[(b + 1280)>>0]) >> 16)-128;
VDU[pos] = ((RGB_YUV_TABLE[(r + 1280)>>0] + RGB_YUV_TABLE[(g + 1536)>>0] + RGB_YUV_TABLE[(b + 1792)>>0]) >> 16)-128;
}
DCY = processDU(YDU, fdtbl_Y, DCY, YDC_HT, YAC_HT);
DCU = processDU(UDU, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);
DCV = processDU(VDU, fdtbl_UV, DCV, UVDC_HT, UVAC_HT);
x+=32;
}
y+=8;
}
////////////////////////////////////////////////////////////////
// Do the bit alignment of the EOI marker
if ( bytepos >= 0 ) {
var fillbits = [];
fillbits[1] = bytepos+1;
fillbits[0] = (1<<(bytepos+1))-1;
writeBits(fillbits);
}
writeWord(0xFFD9); //EOI
var jpegDataUri = 'data:image/jpeg;base64,' + btoa(byteout.join(''));
byteout = [];
// benchmarking
// var duration = new Date().getTime() - time_start;
// console.log('Encoding time: '+ currentQuality + 'ms');
//
return jpegDataUri
}
function setQuality(quality){
if (quality <= 0) {
quality = 1;
}
if (quality > 100) {
quality = 100;
}
if(currentQuality == quality) return // don't recalc if unchanged
var sf = 0;
if (quality < 50) {
sf = Math.floor(5000 / quality);
} else {
sf = Math.floor(200 - quality*2);
}
initQuantTables(sf);
currentQuality = quality;
// console.log('Quality set to: '+quality +'%');
}
function init(){
// var time_start = new Date().getTime();
if(!quality) quality = 50;
// Create tables
initCharLookupTable()
initHuffmanTbl();
initCategoryNumber();
initRGBYUVTable();
setQuality(quality);
// var duration = new Date().getTime() - time_start;
// console.log('Initialization '+ duration + 'ms');
}
init();
};
JPEGEncoder.encode = function( data, quality ) {
var encoder = new JPEGEncoder( quality );
return encoder.encode( data );
}
return JPEGEncoder;
});
/**
* @fileOverview Fix android canvas.toDataUrl bug.
*/
define('runtime/html5/androidpatch',[
'runtime/html5/util',
'runtime/html5/jpegencoder',
'base'
], function( Util, encoder, Base ) {
var origin = Util.canvasToDataUrl,
supportJpeg;
Util.canvasToDataUrl = function( canvas, type, quality ) {
var ctx, w, h, fragement, parts;
// 非android手机直接跳过。
if ( !Base.os.android ) {
return origin.apply( null, arguments );
}
// 检测是否canvas支持jpeg导出,根据数据格式来判断。
// JPEG 前两位分别是:255, 216
if ( type === 'image/jpeg' && typeof supportJpeg === 'undefined' ) {
fragement = origin.apply( null, arguments );
parts = fragement.split(',');
if ( ~parts[ 0 ].indexOf('base64') ) {
fragement = atob( parts[ 1 ] );
} else {
fragement = decodeURIComponent( parts[ 1 ] );
}
fragement = fragement.substring( 0, 2 );
supportJpeg = fragement.charCodeAt( 0 ) === 255 &&
fragement.charCodeAt( 1 ) === 216;
}
// 只有在android环境下才修复
if ( type === 'image/jpeg' && !supportJpeg ) {
w = canvas.width;
h = canvas.height;
ctx = canvas.getContext('2d');
return encoder.encode( ctx.getImageData( 0, 0, w, h ), quality );
}
return origin.apply( null, arguments );
};
});
/**
* @fileOverview Transport
* @todo 支持chunked传输,优势:
* 可以将大文件分成小块,挨个传输,可以提高大文件成功率,当失败的时候,也只需要重传那小部分,
* 而不需要重头再传一次。另外断点续传也需要用chunked方式。
*/
define('runtime/html5/transport',[
'base',
'runtime/html5/runtime'
], function( Base, Html5Runtime ) {
var noop = Base.noop,
$ = Base.$;
return Html5Runtime.register( 'Transport', {
init: function() {
this._status = 0;
this._response = null;
},
send: function() {
var owner = this.owner,
opts = this.options,
xhr = this._initAjax(),
blob = owner._blob,
server = opts.server,
formData, binary, fr;
if ( opts.sendAsBinary ) {
server += (/\?/.test( server ) ? '&' : '?') +
$.param( owner._formData );
binary = blob.getSource();
} else {
formData = new FormData();
$.each( owner._formData, function( k, v ) {
formData.append( k, v );
});
formData.append( opts.fileVal, blob.getSource(),
opts.filename || owner._formData.name || '' );
}
if ( opts.withCredentials && 'withCredentials' in xhr ) {
xhr.open( opts.method, server, true );
xhr.withCredentials = true;
} else {
xhr.open( opts.method, server );
}
this._setRequestHeader( xhr, opts.headers );
if ( binary ) {
xhr.overrideMimeType('application/octet-stream');
// android直接发送blob会导致服务端接收到的是空文件。
// bug详情。
// https://code.google.com/p/android/issues/detail?id=39882
// 所以先用fileReader读取出来再通过arraybuffer的方式发送。
if ( Base.os.android ) {
fr = new FileReader();
fr.onload = function() {
xhr.send( this.result );
fr = fr.onload = null;
};
fr.readAsArrayBuffer( binary );
} else {
xhr.send( binary );
}
} else {
xhr.send( formData );
}
},
getResponse: function() {
return this._response;
},
getResponseAsJson: function() {
return this._parseJson( this._response );
},
getStatus: function() {
return this._status;
},
abort: function() {
var xhr = this._xhr;
if ( xhr ) {
xhr.upload.onprogress = noop;
xhr.onreadystatechange = noop;
xhr.abort();
this._xhr = xhr = null;
}
},
destroy: function() {
this.abort();
},
_initAjax: function() {
var me = this,
xhr = new XMLHttpRequest(),
opts = this.options;
if ( opts.withCredentials && !('withCredentials' in xhr) &&
typeof XDomainRequest !== 'undefined' ) {
xhr = new XDomainRequest();
}
xhr.upload.onprogress = function( e ) {
var percentage = 0;
if ( e.lengthComputable ) {
percentage = e.loaded / e.total;
}
return me.trigger( 'progress', percentage );
};
xhr.onreadystatechange = function() {
if ( xhr.readyState !== 4 ) {
return;
}
xhr.upload.onprogress = noop;
xhr.onreadystatechange = noop;
me._xhr = null;
me._status = xhr.status;
if ( xhr.status >= 200 && xhr.status < 300 ) {
me._response = xhr.responseText;
return me.trigger('load');
} else if ( xhr.status >= 500 && xhr.status < 600 ) {
me._response = xhr.responseText;
return me.trigger( 'error', 'server' );
}
return me.trigger( 'error', me._status ? 'http' : 'abort' );
};
me._xhr = xhr;
return xhr;
},
_setRequestHeader: function( xhr, headers ) {
$.each( headers, function( key, val ) {
xhr.setRequestHeader( key, val );
});
},
_parseJson: function( str ) {
var json;
try {
json = JSON.parse( str );
} catch ( ex ) {
json = {};
}
return json;
}
});
});
define('webuploader',[
'base',
'widgets/filepicker',
'widgets/image',
'widgets/queue',
'widgets/runtime',
'widgets/upload',
'runtime/html5/blob',
'runtime/html5/filepicker',
'runtime/html5/imagemeta/exif',
'runtime/html5/image',
'runtime/html5/androidpatch',
'runtime/html5/transport'
], function( Base ) {
return Base;
});
return require('webuploader');
});
|
'use strict';
const parseDocumentCode = require('../parsers/mozambiqueTD1/parseDocumentCodeId');
const parseOptional = require('../parsers/parseOptional');
const parseDocumentNumberOptional = require('../parsers/parseDocumentNumberOptional');
const {
documentCodeTemplate,
issuingStateTemplate,
documentNumberTemplate,
documentNumberCheckDigitTemplate,
birthDateTemplate,
birthDateCheckDigitTemplate,
sexTemplate,
expirationDateTemplate,
expirationDateCheckDigitTemplate,
nationalityTemplate,
compositeCheckDigitTemplate,
lastNameTemplate,
firstNameTemplate
} = require('./fieldTemplates');
const createFieldParser = require('./createFieldParser');
module.exports = [
Object.assign({}, documentCodeTemplate, {
line: 0,
start: 0,
end: 2,
parser: parseDocumentCode
}),
Object.assign({}, issuingStateTemplate, {
line: 0,
start: 2,
end: 5
}),
Object.assign({}, documentNumberTemplate, {
line: 0,
start: 5,
end: 14,
related: [
{
line: 0,
start: 14,
end: 15
},
{
line: 0,
start: 15,
end: 30
}
]
}),
Object.assign(documentNumberCheckDigitTemplate, {
line: 0,
start: 14,
end: 15,
related: [
{
line: 0,
start: 5,
end: 14
},
{
line: 0,
start: 15,
end: 30
}
]
}),
{
label: 'Optional field 1',
field: 'optional1',
line: 0,
start: 15,
end: 30,
related: [
{
line: 0,
start: 5,
end: 14
},
{
line: 0,
start: 14,
end: 15
}
],
parser: parseDocumentNumberOptional
},
Object.assign({}, birthDateTemplate, {
start: 0,
end: 6,
line: 1
}),
Object.assign({}, birthDateCheckDigitTemplate, {
line: 1,
start: 6,
end: 7,
related: [
{
line: 1,
start: 0,
end: 6
}
]
}),
Object.assign({}, sexTemplate, {
line: 1,
start: 7,
end: 8
}),
Object.assign({}, expirationDateTemplate, {
line: 1,
start: 8,
end: 14
}),
Object.assign({}, expirationDateCheckDigitTemplate, {
line: 1,
start: 14,
end: 15,
related: [
{
line: 1,
start: 8,
end: 14
}
]
}),
Object.assign({}, nationalityTemplate, {
line: 1,
start: 15,
end: 18
}),
{
label: 'Optional field 2',
field: 'optional2',
line: 1,
start: 18,
end: 29,
parser: parseOptional
},
Object.assign({}, compositeCheckDigitTemplate, {
line: 1,
start: 29,
end: 30,
related: [
{
line: 0,
start: 5,
end: 30
},
{
line: 1,
start: 0,
end: 7
},
{
line: 1,
start: 8,
end: 15
},
{
line: 1,
start: 18,
end: 29
}
]
}),
Object.assign({}, lastNameTemplate, {
line: 2,
start: 0,
end: 30
}),
Object.assign({}, firstNameTemplate, {
line: 2,
start: 0,
end: 30
})
].map(createFieldParser);
|
#!/usr/bin/env node
"use strict";
var ServiceRunner = require('service-runner');
new ServiceRunner().start();
|
exports.HemlockNode = HemlockNode;
const async = require('async');
const WebSocket = require('ws');
const findPort = require('find-port');
const fs = require('fs');
const axios = require('axios');
const jsondiffpatch = require('jsondiffpatch');
const object_hash = require('object-hash');
//const REQUEST = require('request');
const HemlockNodeConfig = require(__dirname + '/hemlocknodeconfig.js').HemlockNodeConfig;
const HemlockConnectionToChildNode = require(__dirname + '/hemlockconnectiontochildnode.js').HemlockConnectionToChildNode;
const HemlockConnectionToParentHub = require(__dirname + '/hemlockconnectiontoparenthub.js').HemlockConnectionToParentHub;
const HemlockHubManager = require(__dirname + '/hemlockhubmanager.js').HemlockHubManager;
const PoliteWebSocket = require(__dirname + '/politewebsocket.js').PoliteWebSocket;
const logger = require(__dirname + '/logger.js').logger();
// TODO: think of a better default range
const HEMLOCK_LEAF_PORT_RANGE = process.env.HEMLOCK_LEAF_PORT_RANGE || '2000-3000';
const HEMLOCK_LEAF_HOST = process.env.HEMLOCK_LEAF_HOST || 'localhost';
// hemlock node type: hub or leaf
function HemlockNode(hemlock_node_directory, node_type) {
this.setHttpServer = function(app) {
m_http_server = app;
};
this.initialize = function(opts, callback) {
initialize(opts, callback);
};
this.setRootUrl = function(url) {
m_root_url = url;
};
this.setLeafManager = function(MM) {
m_context.leaf_manager = MM;
};
this.context = function() {
return m_context;
};
let m_context = {};
let m_last_node_data_reported = null;
var m_http_server = null;
m_context.connection_to_parent_hub = null;
var m_root_url = process.env.KBUCKET_URL||'https://kbucket.flatironinstitute.org';
let m_config_directory_name = '';
let m_config_file_name = '';
let m_config = null;
// only used for node_type='hub'
m_context.hub_manager = null;
function initialize(opts, callback) {
opts.config_directory_name = opts.config_directory_name || '.kbucket';
opts.config_file_name = opts.config_file_name || 'kbnode.json';
m_context.config = new HemlockNodeConfig(hemlock_node_directory, opts);
m_config = m_context.config;
m_config_directory_name = opts.config_directory_name;
m_config_file_name = opts.config_file_name;
if (node_type == 'hub')
m_context.hub_manager = new HemlockHubManager(m_config);
var steps = [];
// for both types
steps.push(create_config_if_needed);
steps.push(generate_pem_keys_and_id_if_needed);
steps.push(initialize_config);
steps.push(run_interactive_config);
if (!opts.no_server) {
steps.push(start_http_server);
if (node_type == 'hub') {
steps.push(start_websocket_server);
}
steps.push(connect_to_parent_hub);
steps.push(start_sending_node_data_to_parent);
steps.push(start_checking_config_changed);
}
async.series(steps, function(err) {
callback(err);
});
function run_interactive_config(callback) {
m_config.runInteractiveConfiguration(opts, callback);
}
function create_config_if_needed(callback) {
if (!m_config.configDirExists()) {
console.info(`Creating ${node_type} configuration in ${m_config.hemlockNodeDirectory()}/${m_config_directory_name} ...`);
m_config.createNew(node_type, opts, function(err) {
if (err) {
callback(err);
return;
}
callback(null);
});
} else {
callback(null);
}
}
function generate_pem_keys_and_id_if_needed(callback) {
var private_key_fname = m_config.configDir() + '/private.pem';
var public_key_fname = m_config.configDir() + '/public.pem';
if ((!fs.existsSync(public_key_fname)) && (!fs.existsSync(private_key_fname))) {
console.info('Creating private/public keys ...');
m_config.generatePemFilesAndId(opts, function(err) {
if (err) {
callback(err);
return;
}
callback(null);
});
} else {
callback(null);
}
}
}
function initialize_config(callback) {
console.info('Initializing configuration...');
m_config.initialize(function(err) {
if (err) {
callback(err);
return;
}
require(__dirname + '/logger.js').initialize({
application: 'hemlock',
directory: m_config.configDir() + '/logs'
});
if (m_config.hemlockNodeType() != node_type) {
callback('Incorrect type for hemlock node: ' + m_config.hemlockNodeType());
return;
}
callback(null);
});
}
function start_http_server(callback) {
if (!m_http_server) {
callback('Http server not set.');
return;
}
let app = m_http_server;
get_listen_port(function(err, listen_port) {
if (err) {
callback(err);
return;
}
app.port = listen_port;
m_config.setListenPort(listen_port);
if (process.env.SSL != null ? process.env.SSL : listen_port % 1000 == 443) {
// The port number ends with 443, so we are using https
app.USING_HTTPS = true;
app.protocol = 'https';
// Look for the credentials inside the encryption directory
// You can generate these for free using the tools of letsencrypt.org
const options = {
key: fs.readFileSync(__dirname + '/encryption/privkey.pem'),
cert: fs.readFileSync(__dirname + '/encryption/fullchain.pem'),
ca: fs.readFileSync(__dirname + '/encryption/chain.pem')
};
// Create the https server
app.server = require('https').createServer(options, app);
} else {
app.protocol = 'http';
// Create the http server and start listening
app.server = require('http').createServer(app);
}
// start listening
logger.info('Starting http server.', {
port: app.port,
protocol: app.protocol,
node_type: node_type
});
app.server.listen(listen_port, function() {
console.info(`${m_config.getConfig('network_type')} server is running ${app.protocol} on port ${app.port}`);
callback(null);
});
});
}
function get_node_data_for_parent() {
if (m_config.hemlockNodeType() == 'hub') {
return m_context.hub_manager.nodeDataForParent();
} else if (m_config.hemlockNodeType() == 'leaf') {
if (!m_context.leaf_manager) {
console.error('Leaf manager not set.');
process.exit(-1);
}
return m_context.leaf_manager.nodeDataForParent();
} else {
return {};
}
}
function connect_to_parent_hub(callback) {
var opts = {
retry_timeout_sec: 4,
retry2_timeout_sec: 10
};
do_connect_to_parent_hub(opts, function(err) {
if (err) {
setTimeout(function() {
console.error('Connection to parent hub failed: ' + err);
console.info(`Trying again in ${opts.retry_timeout_sec} seconds`);
connect_to_parent_hub(callback);
}, opts.retry_timeout_sec * 1000);
return;
}
callback(null);
});
}
function do_connect_to_parent_hub(opts, callback) {
var parent_hub_url = m_config.getConfig('parent_hub_url');
if ((!parent_hub_url) || (parent_hub_url == '.')) {
if (node_type == 'leaf') {
callback('No parent hub url specified.');
} else {
callback(null);
}
return;
}
m_context.connection_to_parent_hub = new HemlockConnectionToParentHub(m_config);
m_context.connection_to_parent_hub.onClose(function() {
m_last_node_data_reported = null;
m_context.connection_to_parent_hub = null;
if (opts.retry_timeout_sec) {
var logmsg = `Connection to parent hub closed. Will retry in ${opts.retry_timeout_sec} seconds...`;
logger.info(logmsg);
console.info(logmsg);
setTimeout(function() {
retry_connect_to_parent_hub(opts);
}, opts.retry_timeout_sec * 1000);
}
});
/////////////////////////////////////////////////////////////////////////////////////
console.info('Connecting to parent hub: ' + parent_hub_url);
logger.info('Attempting to connect to parent hub', {
opts: opts
});
/////////////////////////////////////////////////////////////////////////////////////
m_context.connection_to_parent_hub.initialize(parent_hub_url, function(err) {
if (err) {
callback(err);
return;
}
if (m_context.leaf_manager) {
m_context.leaf_manager.restart();
}
callback(null);
});
}
function retry_connect_to_parent_hub(opts) {
do_connect_to_parent_hub(opts, function(err) {
if (err) {
console.error(err);
if (opts.retry2_timeout_sec) {
var logmsg = `Failed to reconnect to parent hub. Will retry in ${opts.retry2_timeout_sec} seconds...`;
logger.info(logmsg);
console.info(logmsg);
setTimeout(function() {
retry_connect_to_parent_hub(opts);
}, opts.retry2_timeout_sec * 1000);
}
}
});
}
function start_websocket_server(callback) {
if (node_type != 'hub') {
console.error('start_websocket_server is only for node_type=hub');
process.exit(-1);
}
//initialize the WebSocket server instance
logger.info('Starting WebSocket server.');
const wss = new WebSocket.Server({
server: m_http_server.server
});
wss.on('connection', (ws, req) => {
// Logging ////////////////////////////////
const ip = req.connection.remoteAddress;
var ip_forwarded_for;
if (req.headers['x-forwarded-for']) {
ip_forwarded_for = req.headers['x-forwarded-for'].split(/\s*,\s*/)[0];
}
logger.info('New websocket connection.', {
ip: ip,
ip_forwarded_for: ip_forwarded_for
});
////////////////////////////////////////////
on_new_websocket_connection(ws);
});
callback(null);
}
function on_new_websocket_connection(ws) {
if (node_type != 'hub') {
console.error('on_new_websocket_connection is only for node_type=hub');
process.exit(-1);
}
var PWS = new PoliteWebSocket({
wait_for_response: false,
enforce_remote_wait_for_response: true,
timeout_sec: 60
});
PWS.setSocket(ws);
var CC = new HemlockConnectionToChildNode(m_config);
CC.setWebSocket(PWS);
CC.onRegistered(function() {
logger.info('Child has registered', {
info: CC.childNodeRegistrationInfo()
});
if (CC.childNodeType() == 'leaf') {
// Everything looks okay, let's add this leaf to our manager
const logmsg = `Adding child (leaf): ${CC.childNodeRegistrationInfo().name} (${CC.childNodeId()})`;
logger.info(logmsg);
console.info(logmsg);
m_context.hub_manager.connectedLeafManager().addConnectedLeaf(CC, function(err) {
if (err) {
PWS.sendErrorAndClose(`Error adding leaf: ${err}`);
return;
}
// acknowledge receipt of the register message so that the child node can proceed
CC.sendMessage({
command: 'confirm_registration',
info: m_config.getNodeInfo()
});
});
//todo: how do we free up the CC object?
} else if (CC.childNodeType() == 'hub') {
// Everything looks okay, let's add this hub to our manager
const logmsg = `Adding child hub: ${CC.childNodeRegistrationInfo().name} (${CC.childNodeId()})`;
logger.info(logmsg);
console.info(logmsg);
m_context.hub_manager.connectedChildHubManager().addConnectedChildHub(CC, function(err) {
if (err) {
PWS.sendErrorAndClose(`Error adding child hub: ${err}`);
return;
}
// acknowledge receipt of the register message so that the child node can proceed
CC.sendMessage({
command: 'confirm_registration',
info: m_config.getNodeInfo()
});
});
} else {
PWS.sendErrorAndClose('Unexpected child node type: ' + CC.childNodeType());
}
});
}
function start_checking_config_changed(callback) {
let config_file_mtime=null;
do_check();
callback();
function do_check() {
let config_fname=hemlock_node_directory + '/' + m_config_directory_name + '/' + m_config_file_name;
if (!fs.existsSync(config_fname)) {
console.info('Configuration file does not exist. Exiting.');
process.exit(-1);
}
let stat0=stat_file(config_fname);
if (!stat0) {
console.info('Unable to stat config file. Exiting.');
process.exit(-1);
}
if (config_file_mtime) {
if ((stat0.mtime+'')!=(config_file_mtime+'')) {
console.info('Configuration file has been modified. Exiting.');
process.exit(-1);
}
}
config_file_mtime=stat0.mtime;
setTimeout(function() {
do_check();
}, 3000);
}
}
function get_listen_port(callback) {
if (node_type == 'leaf') {
// TODO: figure out better method for determining port in range
get_free_port_in_range(HEMLOCK_LEAF_PORT_RANGE.split('-'), function(err, listen_port) {
if (err) {
callback(err);
return;
}
callback(null, listen_port);
});
} else {
var port = m_config.getConfig('listen_port');
callback(null, port);
}
}
function get_free_port_in_range(range, callback) {
if (range.length > 2) {
callback('Invalid port range.');
return;
}
if (range.length < 1) {
callback('Invalid port range (*).');
return;
}
if (range.length == 1) {
range.push(range[0]);
}
range[0] = Number(range[0]);
range[1] = Number(range[1]);
findPort('127.0.0.1', range[0], range[1], function(ports) {
if (ports.length == 0) {
callback(`No free ports found in range ${range[0]}-${range[1]}`);
return;
}
callback(null, ports[0]);
});
}
function start_sending_node_data_to_parent(callback) {
setTimeout(function() {
do_send_node_data_to_parent();
}, 1000);
callback();
}
function do_send_node_data_to_parent() {
if (!m_context.connection_to_parent_hub) {
finalize(1000);
return;
}
const node_data = get_node_data_for_parent();
let msg = {
command: 'report_node_data'
};
if (m_last_node_data_reported) {
let delta = jsondiffpatch.diff(m_last_node_data_reported, node_data);
if (delta)
msg.data_delta = delta;
else
msg.data_nochange = true;
} else {
msg.data = node_data;
}
msg.data_hash=object_hash(node_data);
m_last_node_data_reported = node_data;
m_context.connection_to_parent_hub.sendMessage(msg);
finalize(5000);
function finalize(msec_timeout) {
setTimeout(function() {
do_send_node_data_to_parent();
}, msec_timeout);
}
}
}
function format_file_size(size_bytes) {
var a = 1024;
var aa = a * a;
var aaa = a * a * a;
if (size_bytes > aaa) {
return Math.floor(size_bytes / aaa) + ' GB';
} else if (size_bytes > aaa) {
return Math.floor(size_bytes / (aaa / 10)) / 10 + ' GB';
} else if (size_bytes > aa) {
return Math.floor(size_bytes / aa) + ' MB';
} else if (size_bytes > aa) {
return Math.floor(size_bytes / (aa / 10)) / 10 + ' MB';
} else if (size_bytes > 10 * a) {
return Math.floor(size_bytes / a) + ' KB';
} else if (size_bytes > a) {
return Math.floor(size_bytes / (a / 10)) / 10 + ' KB';
} else {
return size_bytes + ' bytes';
}
}
/*
function write_text_file(fname, txt) {
try {
require('fs').writeFileSync(fname, txt);
return true;
} catch (err) {
return false;
}
}
*/
function write_json_file(fname, obj) {
try {
require('fs').writeFileSync(fname, JSON.stringify(obj, null, 4));
return true;
} catch (err) {
return false;
}
}
function stat_file(fname) {
try {
return require('fs').statSync(fname);
} catch (err) {
return null;
}
}
|
module.exports = {
presets: [
'@babel/preset-env',
'@babel/preset-typescript',
['@babel/preset-react', {
runtime: 'automatic'
}]
]
}
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceResult',
'AwaitableGetNetworkInterfaceResult',
'get_network_interface',
]
@pulumi.output_type
class GetNetworkInterfaceResult:
"""
A network interface in a resource group.
"""
def __init__(__self__, dns_settings=None, enable_accelerated_networking=None, enable_ip_forwarding=None, etag=None, hosted_workloads=None, id=None, ip_configurations=None, location=None, mac_address=None, name=None, network_security_group=None, primary=None, private_endpoint=None, provisioning_state=None, resource_guid=None, tags=None, tap_configurations=None, type=None, virtual_machine=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking and not isinstance(enable_accelerated_networking, bool):
raise TypeError("Expected argument 'enable_accelerated_networking' to be a bool")
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding and not isinstance(enable_ip_forwarding, bool):
raise TypeError("Expected argument 'enable_ip_forwarding' to be a bool")
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if hosted_workloads and not isinstance(hosted_workloads, list):
raise TypeError("Expected argument 'hosted_workloads' to be a list")
pulumi.set(__self__, "hosted_workloads", hosted_workloads)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mac_address and not isinstance(mac_address, str):
raise TypeError("Expected argument 'mac_address' to be a str")
pulumi.set(__self__, "mac_address", mac_address)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group and not isinstance(network_security_group, dict):
raise TypeError("Expected argument 'network_security_group' to be a dict")
pulumi.set(__self__, "network_security_group", network_security_group)
if primary and not isinstance(primary, bool):
raise TypeError("Expected argument 'primary' to be a bool")
pulumi.set(__self__, "primary", primary)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tap_configurations and not isinstance(tap_configurations, list):
raise TypeError("Expected argument 'tap_configurations' to be a list")
pulumi.set(__self__, "tap_configurations", tap_configurations)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_machine and not isinstance(virtual_machine, dict):
raise TypeError("Expected argument 'virtual_machine' to be a dict")
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[bool]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[bool]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="hostedWorkloads")
def hosted_workloads(self) -> Sequence[str]:
"""
A list of references to linked BareMetal resources.
"""
return pulumi.get(self, "hosted_workloads")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> 'outputs.PrivateEndpointResponse':
"""
A reference to the private endpoint to which the network interface is linked.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network interface resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tapConfigurations")
def tap_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
A list of TapConfigurations of the network interface.
"""
return pulumi.get(self, "tap_configurations")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> 'outputs.SubResourceResponse':
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
class AwaitableGetNetworkInterfaceResult(GetNetworkInterfaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceResult(
dns_settings=self.dns_settings,
enable_accelerated_networking=self.enable_accelerated_networking,
enable_ip_forwarding=self.enable_ip_forwarding,
etag=self.etag,
hosted_workloads=self.hosted_workloads,
id=self.id,
ip_configurations=self.ip_configurations,
location=self.location,
mac_address=self.mac_address,
name=self.name,
network_security_group=self.network_security_group,
primary=self.primary,
private_endpoint=self.private_endpoint,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
tap_configurations=self.tap_configurations,
type=self.type,
virtual_machine=self.virtual_machine)
def get_network_interface(expand: Optional[str] = None,
network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceResult:
"""
A network interface in a resource group.
:param str expand: Expands referenced resources.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190801:getNetworkInterface', __args__, opts=opts, typ=GetNetworkInterfaceResult).value
return AwaitableGetNetworkInterfaceResult(
dns_settings=__ret__.dns_settings,
enable_accelerated_networking=__ret__.enable_accelerated_networking,
enable_ip_forwarding=__ret__.enable_ip_forwarding,
etag=__ret__.etag,
hosted_workloads=__ret__.hosted_workloads,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
location=__ret__.location,
mac_address=__ret__.mac_address,
name=__ret__.name,
network_security_group=__ret__.network_security_group,
primary=__ret__.primary,
private_endpoint=__ret__.private_endpoint,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
tap_configurations=__ret__.tap_configurations,
type=__ret__.type,
virtual_machine=__ret__.virtual_machine)
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
// sample-metadata:
// title: Asset History Quickstart
// description: Batch get history of assets.
// usage: node getBatchAssetHistory "//storage.googleapis.com/<BUCKET_NAME>"
async function main(assetNames) {
// [START asset_quickstart]
const util = require('util');
const {AssetServiceClient} = require('@google-cloud/asset');
const client = new AssetServiceClient();
async function quickstart() {
const projectId = await client.getProjectId();
const projectResource = `projects/${projectId}`;
// TODO(developer): Choose asset names, such as //storage.googleapis.com/[YOUR_BUCKET_NAME].
// const assetNames = ['ASSET_NAME1', 'ASSET_NAME2', ...];
const request = {
parent: projectResource,
assetNames: assetNames.split(','),
contentType: 'RESOURCE',
readTimeWindow: {
startTime: {
seconds: Math.floor(new Date().getTime() / 1000),
},
},
};
// Handle the operation using the promise pattern.
const result = await client.batchGetAssetsHistory(request);
// Do things with with the response.
console.log(util.inspect(result, {depth: null}));
// [END asset_quickstart]
}
quickstart();
}
main(...process.argv.slice(2));
|
/*
* Class TIME_VALUE
*/
#include "eif_macros.h"
#ifdef __cplusplus
extern "C" {
#endif
static const EIF_TYPE_INDEX egt_0_986 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_1_986 [] = {0xFF01,245,985,0xFFFF};
static const EIF_TYPE_INDEX egt_2_986 [] = {0xFF01,985,0xFFFF};
static const EIF_TYPE_INDEX egt_3_986 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_4_986 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_5_986 [] = {0xFF01,985,0xFFFF};
static const EIF_TYPE_INDEX egt_6_986 [] = {0xFF01,985,0xFFFF};
static const EIF_TYPE_INDEX egt_7_986 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_8_986 [] = {0xFF01,14,0xFFFF};
static const EIF_TYPE_INDEX egt_9_986 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_10_986 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_11_986 [] = {0xFF01,15,0xFFFF};
static const EIF_TYPE_INDEX egt_12_986 [] = {0xFF01,985,0xFFFF};
static const EIF_TYPE_INDEX egt_13_986 [] = {0xFF01,975,0xFFFF};
static const EIF_TYPE_INDEX egt_14_986 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_15_986 [] = {0xFF01,232,0xFFFF};
static const struct desc_info desc_986[] = {
{EIF_GENERIC(NULL), 0xFFFFFFFF, 0xFFFFFFFF},
{EIF_GENERIC(egt_0_986), 0, 0xFFFFFFFF},
{EIF_GENERIC(egt_1_986), 1, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 2, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 3, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 4, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 5, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 6, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 7, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 9, 0xFFFFFFFF},
{EIF_GENERIC(egt_2_986), 10, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 11, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 12, 0xFFFFFFFF},
{EIF_GENERIC(egt_3_986), 13, 0xFFFFFFFF},
{EIF_GENERIC(egt_4_986), 14, 0xFFFFFFFF},
{EIF_GENERIC(egt_5_986), 15, 0xFFFFFFFF},
{EIF_GENERIC(egt_6_986), 16, 0xFFFFFFFF},
{EIF_GENERIC(egt_7_986), 17, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 18, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 19, 0xFFFFFFFF},
{EIF_GENERIC(egt_8_986), 20, 0xFFFFFFFF},
{EIF_GENERIC(egt_9_986), 21, 0xFFFFFFFF},
{EIF_GENERIC(egt_10_986), 22, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 23, 0xFFFFFFFF},
{EIF_GENERIC(egt_11_986), 24, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 25, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 26, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 27, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x07B3 /*985*/), 28, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01C7 /*227*/), 29, 0xFFFFFFFF},
{EIF_GENERIC(egt_12_986), 30, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13723, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13724, 0xFFFFFFFF},
{EIF_GENERIC(egt_13_986), 13725, 0xFFFFFFFF},
{EIF_GENERIC(egt_14_986), 13726, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13728, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13729, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13730, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13731, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13732, 0xFFFFFFFF},
{EIF_GENERIC(egt_15_986), 13727, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13733, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13734, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13735, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x019D /*206*/), 13738, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 13744, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 13745, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 13746, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 13743, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 13742, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x019D /*206*/), 13736, 4},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13737, 0},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13739, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13740, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13741, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13747, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13748, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13749, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13750, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 13751, 0xFFFFFFFF},
};
void Init986(void)
{
IDSC(desc_986, 0, 985);
IDSC(desc_986 + 1, 1, 985);
IDSC(desc_986 + 32, 435, 985);
IDSC(desc_986 + 36, 432, 985);
IDSC(desc_986 + 42, 425, 985);
IDSC(desc_986 + 51, 428, 985);
}
#ifdef __cplusplus
}
#endif
|
!function(a,b,c,d){"use strict";var e=function(){},f=function(e,f){if(e.hasClass(f.slides_container_class))return this;var k,m,n,o,q,r,j=this,l=e,p=0,s=!1;l.children().first().addClass(f.active_slide_class),j.update_slide_number=function(b){f.slide_number&&(m.find("span:first").text(parseInt(b)+1),m.find("span:last").text(l.children().length)),f.bullets&&(n.children().removeClass(f.bullets_active_class),a(n.children().get(b)).addClass(f.bullets_active_class))},j.build_markup=function(){l.wrap('<div class="'+f.container_class+'"></div>'),k=l.parent(),l.addClass(f.slides_container_class),f.navigation_arrows&&(k.append(a("<a>").addClass(f.prev_class).append("<span>")),k.append(a("<a>").addClass(f.next_class).append("<span>"))),f.timer&&(o=a("<div>").addClass(f.timer_container_class),o.append("<span>"),o.append(a("<div>").addClass(f.timer_progress_class)),o.addClass(f.timer_paused_class),k.append(o)),f.slide_number&&(m=a("<div>").addClass(f.slide_number_class),m.append("<span></span> of <span></span>"),k.append(m)),f.bullets&&(n=a("<ol>").addClass(f.bullets_container_class),k.append(n),l.children().each(function(b){var d=a("<li>").attr("data-orbit-slide",b);n.append(d)})),f.stack_on_small&&k.addClass(f.stack_on_small_class),j.update_slide_number(0)},j._goto=function(b,c){if(b===p)return!1;"object"==typeof r&&r.restart();var d=l.children(),e="next";s=!0,p>b&&(e="prev"),b>=d.length?b=0:0>b&&(b=d.length-1);var g=a(d.get(p)),h=a(d.get(b));g.css("zIndex",2),h.css("zIndex",4).addClass("active"),l.trigger("orbit:before-slide-change"),f.before_slide_change();var i=function(){var a=function(){p=b,s=!1,c===!0&&(r=j.create_timer(),r.start()),j.update_slide_number(p),l.trigger("orbit:after-slide-change",[{slide_number:p,total_slides:d.length}]),f.after_slide_change(p,d.length)};l.height()!=h.height()?l.animate({height:h.height()},250,"linear",a):a()};if(1===d.length)return i(),!1;var k=function(){"next"===e&&q.next(g,h,i),"prev"===e&&q.prev(g,h,i)};h.height()>l.height()?l.animate({height:h.height()},250,"linear",k):k()},j.next=function(a){a.stopImmediatePropagation(),a.preventDefault(),j._goto(p+1)},j.prev=function(a){a.stopImmediatePropagation(),a.preventDefault(),j._goto(p-1)},j.link_custom=function(b){b.preventDefault();var c=a(this).attr("data-orbit-link");if("string"==typeof c&&""!=(c=a.trim(c))){var d=k.find("[data-orbit-slide="+c+"]");-1!=d.index()&&j._goto(d.index())}},j.link_bullet=function(){var c=a(this).attr("data-orbit-slide");"string"==typeof c&&""!=(c=a.trim(c))&&j._goto(c)},j.timer_callback=function(){j._goto(p+1,!0)},j.compute_dimensions=function(){var b=a(l.children().get(p)),c=b.height();f.variable_height||l.children().each(function(){a(this).height()>c&&(c=a(this).height())}),l.height(c)},j.create_timer=function(){var a=new g(k.find("."+f.timer_container_class),f,j.timer_callback);return a},j.stop_timer=function(){"object"==typeof r&&r.stop()},j.toggle_timer=function(){var a=k.find("."+f.timer_container_class);a.hasClass(f.timer_paused_class)?("undefined"==typeof r&&(r=j.create_timer()),r.start()):"object"==typeof r&&r.stop()},j.init=function(){j.build_markup(),f.timer&&(r=j.create_timer(),r.start()),q=new i(l),"slide"===f.animation&&(q=new h(l)),k.on("click","."+f.next_class,j.next),k.on("click","."+f.prev_class,j.prev),k.on("click","[data-orbit-slide]",j.link_bullet),k.on("click",j.toggle_timer),k.on("touchstart.fndtn.orbit",function(a){a.touches||(a=a.originalEvent);var b={start_page_x:a.touches[0].pageX,start_page_y:a.touches[0].pageY,start_time:(new Date).getTime(),delta_x:0,is_scrolling:d};k.data("swipe-transition",b),a.stopPropagation()}).on("touchmove.fndtn.orbit",function(a){if(a.touches||(a=a.originalEvent),!(a.touches.length>1||a.scale&&1!==a.scale)){var b=k.data("swipe-transition");if("undefined"==typeof b&&(b={}),b.delta_x=a.touches[0].pageX-b.start_page_x,"undefined"==typeof b.is_scrolling&&(b.is_scrolling=!!(b.is_scrolling||Math.abs(b.delta_x)<Math.abs(a.touches[0].pageY-b.start_page_y))),!b.is_scrolling&&!b.active){a.preventDefault();var c=b.delta_x<0?p+1:p-1;b.active=!0,j._goto(c)}}}).on("touchend.fndtn.orbit",function(a){k.data("swipe-transition",{}),a.stopPropagation()}).on("mouseenter.fndtn.orbit",function(){f.timer&&f.pause_on_hover&&j.stop_timer()}).on("mouseleave.fndtn.orbit",function(){f.timer&&f.resume_on_mouseout&&r.start()}),a(c).on("click","[data-orbit-link]",j.link_custom),a(b).on("resize",j.compute_dimensions),a(b).on("load",j.compute_dimensions),l.trigger("orbit:ready")},j.init()},g=function(a,b,c){var g,h,d=this,e=b.timer_speed,f=a.find("."+b.timer_progress_class),i=-1;this.update_progress=function(a){var b=f.clone();b.attr("style",""),b.css("width",a+"%"),f.replaceWith(b),f=b},this.restart=function(){clearTimeout(h),a.addClass(b.timer_paused_class),i=-1,d.update_progress(0)},this.start=function(){return a.hasClass(b.timer_paused_class)?(i=-1===i?e:i,a.removeClass(b.timer_paused_class),g=(new Date).getTime(),f.animate({width:"100%"},i,"linear"),h=setTimeout(function(){d.restart(),c()},i),a.trigger("orbit:timer-started"),void 0):!0},this.stop=function(){if(a.hasClass(b.timer_paused_class))return!0;clearTimeout(h),a.addClass(b.timer_paused_class);var c=(new Date).getTime();i-=c-g;var f=100-100*(i/e);d.update_progress(f),a.trigger("orbit:timer-stopped")}},h=function(){var c=400,d=1===a("html[dir=rtl]").length,e=d?"marginRight":"marginLeft";this.next=function(a,b,d){b.animate({margin:"0%"},c,"linear",function(){a.css(e,"100%"),d()})},this.prev=function(a,b,d){b.css(e,"-100%"),b.animate({margin:"0%"},c,"linear",function(){a.css(e,"100%"),d()})}},i=function(){var b=250;this.next=function(a,c,d){c.css({marginLeft:"0%",opacity:"0.01"}),c.animate({opacity:"1"},b,"linear",function(){a.css("marginLeft","100%"),d()})},this.prev=function(a,c,d){c.css({marginLeft:"0%",opacity:"0.01"}),c.animate({opacity:"1"},b,"linear",function(){a.css("marginLeft","100%"),d()})}};Foundation.libs=Foundation.libs||{},Foundation.libs.orbit={name:"orbit",version:"4.3.1",settings:{animation:"slide",timer_speed:1e4,pause_on_hover:!0,resume_on_mouseout:!1,animation_speed:500,stack_on_small:!1,navigation_arrows:!0,slide_number:!0,container_class:"orbit-container",stack_on_small_class:"orbit-stack-on-small",next_class:"orbit-next",prev_class:"orbit-prev",timer_container_class:"orbit-timer",timer_paused_class:"paused",timer_progress_class:"orbit-progress",slides_container_class:"orbit-slides-container",bullets_container_class:"orbit-bullets",bullets_active_class:"active",slide_number_class:"orbit-slide-number",caption_class:"orbit-caption",active_slide_class:"active",orbit_transition_class:"orbit-transitioning",bullets:!0,timer:!0,variable_height:!1,before_slide_change:e,after_slide_change:e},init:function(b,c){var e=this;if(Foundation.inherit(e,"data_options"),"object"==typeof c&&a.extend(!0,e.settings,c),a(b).is("[data-orbit]")){var g=a(b),h=e.data_options(g);new f(g,a.extend({},e.settings,h))}a("[data-orbit]",b).each(function(b,c){var d=a(c),g=e.data_options(d);new f(d,a.extend({},e.settings,g))})}}}(Foundation.zj,this,this.document);
|
function addWidgetsfrmTbx() {
var hBoxTextBox = new kony.ui.Box({
"focusSkin": "headBox",
"id": "hBoxTextBox",
"isVisible": true,
"orientation": constants.BOX_LAYOUT_HORIZONTAL,
"position": constants.BOX_POSITION_AS_NORMAL,
"skin": "headBox"
}, {
"containerWeight": 100,
"layoutAlignment": constants.BOX_LAYOUT_ALIGN_FROM_LEFT,
"layoutType": constants.CONTAINER_LAYOUT_BOX,
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"percent": true,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_TOP_LEFT
}, {});
var lblTextBox = new kony.ui.Label({
"id": "lblTextBox",
"isVisible": true,
"skin": "headLabel",
"text": "TextBox"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_CENTER,
"hExpand": true,
"marginInPixel": false,
"padding": [1, 2, 1, 2],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
hBoxTextBox.add(lblTextBox);
var label192735980024525 = new kony.ui.Label({
"id": "label192735980024525",
"isVisible": true,
"skin": "lblSub",
"text": "Default TextBox"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield2192735980025983 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txtBxSkin",
"id": "textfield2192735980025983",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"secureTextEntry": false,
"skin": "txtBxSkin",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 0],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"autoComplete": false,
"autoCorrect": false
});
var label192735980024515 = new kony.ui.Label({
"id": "label192735980024515",
"isVisible": true,
"skin": "lblSub",
"text": "TextBox with placeholder"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield211798972521729 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txtBxSkin",
"id": "textfield211798972521729",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"placeholder": "Enter Text",
"secureTextEntry": false,
"skin": "txtBxSkin",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 0],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_TOP_LEFT
}, {
"autoComplete": false,
"autoCorrect": false
});
var label192735980024550 = new kony.ui.Label({
"id": "label192735980024550",
"isVisible": true,
"skin": "lblSub",
"text": "TextBox with background color"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield2192735980024559 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txxBckColor",
"id": "textfield2192735980024559",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"placeholder": "Enter Text",
"secureTextEntry": false,
"skin": "txxBckColor",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 0],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"autoComplete": false,
"autoCorrect": false
});
var label192735980024575 = new kony.ui.Label({
"id": "label192735980024575",
"isVisible": true,
"skin": "lblSub",
"text": "TextBox with VerticalGradient skin"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield2192735980024586 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txtVerticalGradiant",
"id": "textfield2192735980024586",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"placeholder": "Enter Text",
"secureTextEntry": false,
"skin": "txtVerticalGradiant",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 0],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"autoComplete": false,
"autoCorrect": false
});
var label192735980024608 = new kony.ui.Label({
"id": "label192735980024608",
"isVisible": true,
"skin": "lblSub",
"text": "TextBox with VerticalSplit skin"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield2192735980024620 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txtVertiSplit",
"id": "textfield2192735980024620",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"placeholder": "Enter Text",
"secureTextEntry": false,
"skin": "txtVertiSplit",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 0],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"autoComplete": false,
"autoCorrect": false
});
var label192735980024647 = new kony.ui.Label({
"id": "label192735980024647",
"isVisible": true,
"skin": "lblSub",
"text": "TextBox with background image"
}, {
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [2, 5, 2, 5],
"marginInPixel": false,
"padding": [0, 0, 0, 0],
"paddingInPixel": false,
"vExpand": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"renderAsAnchor": false,
"textCopyable": false
});
var textfield2192735980024664 = new kony.ui.TextBox2({
"autoCapitalize": constants.TEXTBOX_AUTO_CAPITALIZE_NONE,
"focusSkin": "txtImg",
"id": "textfield2192735980024664",
"isVisible": true,
"keyBoardStyle": constants.TEXTBOX_KEY_BOARD_STYLE_DEFAULT,
"placeholder": "Enter Text",
"secureTextEntry": false,
"skin": "txtImg",
"textInputMode": constants.TEXTBOX_INPUT_MODE_ANY
}, {
"containerHeightMode": constants.TEXTBOX_DEFAULT_PLATFORM_HEIGHT,
"containerHeightReference": constants.CONTAINER_HEIGHT_BY_FORM_REFERENCE,
"containerWeight": 100,
"contentAlignment": constants.CONTENT_ALIGN_MIDDLE_LEFT,
"hExpand": true,
"margin": [3, 0, 3, 3],
"marginInPixel": false,
"padding": [1, 3, 0, 3],
"paddingInPixel": false,
"widgetAlignment": constants.WIDGET_ALIGN_CENTER
}, {
"autoComplete": false,
"autoCorrect": false
});
frmTbx.add(hBoxTextBox, label192735980024525, textfield2192735980025983, label192735980024515, textfield211798972521729, label192735980024550, textfield2192735980024559, label192735980024575, textfield2192735980024586, label192735980024608, textfield2192735980024620, label192735980024647, textfield2192735980024664);
};
function frmTbxGlobals() {
frmTbx = new kony.ui.Form2({
"addWidgets": addWidgetsfrmTbx,
"enabledForIdleTimeout": false,
"id": "frmTbx",
"needAppMenu": true,
"skin": "frm",
"title": "TextBox"
}, {
"displayOrientation": constants.FORM_DISPLAY_ORIENTATION_BOTH,
"layoutType": constants.CONTAINER_LAYOUT_BOX,
"paddingInPixel": false
}, {
"inTransitionConfig": {
"formTransition": "leftCenter"
},
"outTransitionConfig": {
"formTransition": "none"
},
"retainScrollPosition": false
});
frmTbx.info = {
"kuid": "frmTbxSPAAndroid"
};
};
|
/* @generated */
// prettier-ignore
if (Intl.DisplayNames && typeof Intl.DisplayNames.__addLocaleData === 'function') {
Intl.DisplayNames.__addLocaleData({"data":{"types":{"language":{"long":{"aa":"afar","ab":"abjasio","ace":"acehnés","ach":"acoli","ada":"adangme","ady":"adigeo","ae":"avéstico","af":"afrikáans","afh":"afrihili","agq":"aghem","ain":"ainu","ak":"akan","akk":"acadio","ale":"aleutiano","alt":"altái del sur","am":"amárico","an":"aragonés","ang":"inglés antiguo","anp":"angika","ar":"árabe","ar-001":"árabe estándar moderno","arc":"arameo","arn":"mapuche","arp":"arapaho","ars":"árabe de Néyed","arw":"arahuaco","as":"asamés","asa":"asu","ast":"asturiano","av":"avar","awa":"avadhi","ay":"aimara","az":"azerbaiyano","ba":"baskir","bal":"baluchi","ban":"balinés","bas":"basaa","bax":"bamún","bbj":"ghomala","be":"bielorruso","bej":"beja","bem":"bemba","bez":"bena","bfd":"bafut","bg":"búlgaro","bgn":"baluchi occidental","bho":"bhojpuri","bi":"bislama","bik":"bicol","bin":"bini","bkm":"kom","bla":"siksiká","bm":"bambara","bn":"bengalí","bo":"tibetano","br":"bretón","bra":"braj","brx":"bodo","bs":"bosnio","bss":"akoose","bua":"buriato","bug":"buginés","bum":"bulu","byn":"blin","byv":"medumba","ca":"catalán","cad":"caddo","car":"caribe","cay":"cayuga","cch":"atsam","ccp":"chakma","ce":"checheno","ceb":"cebuano","cgg":"chiga","ch":"chamorro","chb":"chibcha","chg":"chagatái","chk":"trukés","chm":"marí","chn":"jerga chinuk","cho":"choctaw","chp":"chipewyan","chr":"cheroqui","chy":"cheyene","ckb":"kurdo sorani","co":"corso","cop":"copto","cr":"cree","crh":"tártaro de Crimea","crs":"criollo seychelense","cs":"checo","csb":"casubio","cu":"eslavo eclesiástico","cv":"chuvasio","cy":"galés","da":"danés","dak":"dakota","dar":"dargva","dav":"taita","de":"alemán","de-AT":"alemán austríaco","de-CH":"alto alemán suizo","del":"delaware","den":"slave","dgr":"dogrib","din":"dinka","dje":"zarma","doi":"dogri","dsb":"bajo sorbio","dua":"duala","dum":"neerlandés medio","dv":"divehi","dyo":"jola-fonyi","dyu":"diula","dz":"dzongkha","dzg":"dazaga","ebu":"embu","ee":"ewé","efi":"efik","egy":"egipcio antiguo","eka":"ekajuk","el":"griego","elx":"elamita","en":"inglés","en-AU":"inglés australiano","en-CA":"inglés canadiense","en-GB":"inglés británico","en-US":"inglés estadounidense","enm":"inglés medio","eo":"esperanto","es":"español","es-419":"español latinoamericano","es-ES":"español de España","es-MX":"español de México","et":"estonio","eu":"vasco","ewo":"ewondo","fa":"persa","fa-AF":"darí","fan":"fang","fat":"fanti","ff":"fula","fi":"finés","fil":"filipino","fj":"fiyiano","fo":"feroés","fon":"fon","fr":"francés","fr-CA":"francés canadiense","fr-CH":"francés suizo","frc":"francés cajún","frm":"francés medio","fro":"francés antiguo","frr":"frisón septentrional","frs":"frisón oriental","fur":"friulano","fy":"frisón occidental","ga":"irlandés","gaa":"ga","gag":"gagauzo","gan":"chino gan","gay":"gayo","gba":"gbaya","gd":"gaélico escocés","gez":"geez","gil":"gilbertés","gl":"gallego","gmh":"alto alemán medio","gn":"guaraní","goh":"alemán de la alta edad antigua","gon":"gondi","gor":"gorontalo","got":"gótico","grb":"grebo","grc":"griego antiguo","gsw":"alemán suizo","gu":"gujarati","guz":"gusii","gv":"manés","gwi":"kutchin","ha":"hausa","hai":"haida","hak":"chino hakka","haw":"hawaiano","he":"hebreo","hi":"hindi","hil":"hiligaynon","hit":"hitita","hmn":"hmong","ho":"hiri motu","hr":"croata","hsb":"alto sorbio","hsn":"chino xiang","ht":"haitiano","hu":"húngaro","hup":"hupa","hy":"armenio","hz":"herero","ia":"interlingua","iba":"iban","ibb":"ibibio","id":"indonesio","ie":"interlingue","ig":"igbo","ii":"yi de Sichuán","ik":"inupiaq","ilo":"ilocano","inh":"ingush","io":"ido","is":"islandés","it":"italiano","iu":"inuktitut","ja":"japonés","jbo":"lojban","jgo":"ngomba","jmc":"machame","jpr":"judeo-persa","jrb":"judeo-árabe","jv":"javanés","ka":"georgiano","kaa":"karakalpako","kab":"cabila","kac":"kachin","kaj":"jju","kam":"kamba","kaw":"kawi","kbd":"cabardiano","kbl":"kanembu","kcg":"tyap","kde":"makonde","kea":"criollo caboverdiano","kfo":"koro","kg":"kongo","kgp":"kgp","kha":"khasi","kho":"kotanés","khq":"koyra chiini","ki":"kikuyu","kj":"kuanyama","kk":"kazajo","kkj":"kako","kl":"groenlandés","kln":"kalenjin","km":"jemer","kmb":"kimbundu","kn":"canarés","ko":"coreano","koi":"komi permio","kok":"konkaní","kos":"kosraeano","kpe":"kpelle","kr":"kanuri","krc":"karachái-bálkaro","krl":"carelio","kru":"kurukh","ks":"cachemiro","ksb":"shambala","ksf":"bafia","ksh":"kölsch","ku":"kurdo","kum":"kumyk","kut":"kutenai","kv":"komi","kw":"córnico","ky":"kirguís","la":"latín","lad":"ladino","lag":"langi","lah":"lahnda","lam":"lamba","lb":"luxemburgués","lez":"lezgiano","lg":"ganda","li":"limburgués","lij":"lij","lkt":"lakota","ln":"lingala","lo":"laosiano","lol":"mongo","lou":"criollo de Luisiana","loz":"lozi","lrc":"lorí septentrional","lt":"lituano","lu":"luba-katanga","lua":"luba-lulua","lui":"luiseño","lun":"lunda","luo":"luo","lus":"mizo","luy":"luyia","lv":"letón","mad":"madurés","maf":"mafa","mag":"magahi","mai":"maithili","mak":"macasar","man":"mandingo","mas":"masái","mde":"maba","mdf":"moksha","mdr":"mandar","men":"mende","mer":"meru","mfe":"criollo mauriciano","mg":"malgache","mga":"irlandés medio","mgh":"makhuwa-meetto","mgo":"meta’","mh":"marshalés","mi":"maorí","mic":"micmac","min":"minangkabau","mk":"macedonio","ml":"malayalam","mn":"mongol","mnc":"manchú","mni":"manipuri","moh":"mohawk","mos":"mossi","mr":"maratí","ms":"malayo","mt":"maltés","mua":"mundang","mul":"varios idiomas","mus":"creek","mwl":"mirandés","mwr":"marwari","my":"birmano","mye":"myene","myv":"erzya","mzn":"mazandaraní","na":"nauruano","nan":"chino min nan","nap":"napolitano","naq":"nama","nb":"noruego bokmal","nd":"ndebele septentrional","nds":"bajo alemán","nds-NL":"bajo sajón","ne":"nepalí","new":"newari","ng":"ndonga","nia":"nias","niu":"niueano","nl":"neerlandés","nl-BE":"flamenco","nmg":"kwasio","nn":"noruego nynorsk","nnh":"ngiemboon","no":"noruego","nog":"nogai","non":"nórdico antiguo","nqo":"n’ko","nr":"ndebele del sur","nso":"sotho septentrional","nus":"nuer","nv":"navajo","nwc":"newari clásico","ny":"nyanja","nym":"nyamwezi","nyn":"nyankole","nyo":"nyoro","nzi":"nzima","oc":"occitano","oj":"ojibwa","om":"oromo","or":"oriya","os":"osético","osa":"osage","ota":"turco otomano","pa":"panyabí","pag":"pangasinán","pal":"pahlavi","pam":"pampanga","pap":"papiamento","pau":"palauano","pcm":"pidgin de Nigeria","peo":"persa antiguo","phn":"fenicio","pi":"pali","pl":"polaco","pon":"pohnpeiano","prg":"prusiano antiguo","pro":"provenzal antiguo","ps":"pastún","pt":"portugués","pt-BR":"portugués de Brasil","pt-PT":"portugués de Portugal","qu":"quechua","quc":"quiché","raj":"rajasthani","rap":"rapanui","rar":"rarotongano","rm":"retorrománico","rn":"kirundi","ro":"rumano","ro-MD":"moldavo","rof":"rombo","rom":"romaní","ru":"ruso","rup":"arrumano","rw":"kinyarwanda","rwk":"rwa","sa":"sánscrito","sad":"sandawe","sah":"sakha","sam":"arameo samaritano","saq":"samburu","sas":"sasak","sat":"santali","sba":"ngambay","sbp":"sangu","sc":"sardo","scn":"siciliano","sco":"escocés","sd":"sindhi","sdh":"kurdo meridional","se":"sami septentrional","see":"seneca","seh":"sena","sel":"selkup","ses":"koyraboro senni","sg":"sango","sga":"irlandés antiguo","sh":"serbocroata","shi":"tashelhit","shn":"shan","shu":"árabe (Chad)","si":"cingalés","sid":"sidamo","sk":"eslovaco","sl":"esloveno","sm":"samoano","sma":"sami del sur","smj":"sami lule","smn":"sami inari","sms":"sami skolt","sn":"shona","snk":"soninké","so":"somalí","sog":"sogdiano","sq":"albanés","sr":"serbio","srn":"sranan tongo","srr":"serer","ss":"siswati","ssy":"saho","st":"sesotho del sur","su":"sundanés","suk":"sukuma","sus":"susu","sux":"sumerio","sv":"sueco","sw":"swahili","sw-CD":"swahili (Congo)","swb":"comorense","syc":"siríaco clásico","syr":"siríaco","ta":"tamil","te":"telugu","tem":"temne","teo":"teso","ter":"tereno","tet":"tetun","tg":"tayiko","th":"tailandés","ti":"tigriña","tig":"tigré","tiv":"tiv","tk":"turcomano","tkl":"tokelauano","tl":"tagalo","tlh":"klingon","tli":"tlingit","tmh":"tamashek","tn":"setsuana","to":"tongano","tog":"tonga del Nyasa","tpi":"tok pisin","tr":"turco","trv":"taroko","ts":"tsonga","tsi":"tsimshiano","tt":"tártaro","tum":"tumbuka","tvl":"tuvaluano","tw":"twi","twq":"tasawaq","ty":"tahitiano","tyv":"tuvano","tzm":"tamazight del Atlas Central","udm":"udmurt","ug":"uigur","uga":"ugarítico","uk":"ucraniano","umb":"umbundu","und":"lengua desconocida","ur":"urdu","uz":"uzbeko","vai":"vai","ve":"venda","vi":"vietnamita","vo":"volapük","vot":"vótico","vun":"vunjo","wa":"valón","wae":"walser","wal":"walamo","war":"waray","was":"washo","wbp":"warlpiri","wo":"wolof","wuu":"wu","xal":"calmuco","xh":"xhosa","xog":"soga","yao":"yao","yap":"yapés","yav":"yangben","ybb":"yemba","yi":"yidis","yo":"yoruba","yue":"cantonés","za":"zhuang","zap":"zapoteco","zbl":"símbolos Bliss","zen":"zenaga","zgh":"tamazight estándar marroquí","zh":"chino","zh-Hans":"chino simplificado","zh-Hant":"chino tradicional","zu":"zulú","zun":"zuni","zxx":"sin contenido lingüístico","zza":"zazaki"},"short":{"az":"azerí","en-GB":"inglés británico","en-US":"inglés estadounidense"},"narrow":{}},"region":{"long":{"142":"Asia","143":"Asia central","145":"Asia del Oeste","150":"Europa","151":"Europa del Este","154":"Europa del Norte","155":"Europa del Oeste","202":"África subsahariana","419":"Latinoamérica","001":"Mundo","002":"África","003":"América del Norte","005":"Sudamérica","009":"Oceanía","011":"África del Oeste","013":"Centroamérica","014":"África del Este","015":"África del Norte","017":"África central","018":"África del Sur","019":"América","021":"Norteamérica","029":"Caribe","030":"Asia del Este","034":"Asia del Sur","035":"Asia sudoriental","039":"Europa del Sur","053":"Australasia","054":"Melanesia","057":"Región de Micronesia","061":"Polinesia","AC":"Isla Ascensión","AD":"Andorra","AE":"Emiratos Árabes Unidos","AF":"Afganistán","AG":"Antigua y Barbuda","AI":"Anguila","AL":"Albania","AM":"Armenia","AO":"Angola","AQ":"Antártida","AR":"Argentina","AS":"Samoa Americana","AT":"Austria","AU":"Australia","AW":"Aruba","AX":"Islas Åland","AZ":"Azerbaiyán","BA":"Bosnia-Herzegovina","BB":"Barbados","BD":"Bangladés","BE":"Bélgica","BF":"Burkina Faso","BG":"Bulgaria","BH":"Baréin","BI":"Burundi","BJ":"Benín","BL":"San Bartolomé","BM":"Bermudas","BN":"Brunéi","BO":"Bolivia","BQ":"Caribe neerlandés","BR":"Brasil","BS":"Bahamas","BT":"Bután","BV":"Isla Bouvet","BW":"Botsuana","BY":"Bielorrusia","BZ":"Belice","CA":"Canadá","CC":"Islas Cocos","CD":"República Democrática del Congo","CF":"República Centroafricana","CG":"República del Congo","CH":"Suiza","CI":"Costa de Marfil","CK":"Islas Cook","CL":"Chile","CM":"Camerún","CN":"China","CO":"Colombia","CP":"Isla Clipperton","CR":"Costa Rica","CU":"Cuba","CV":"Cabo Verde","CW":"Curazao","CX":"Isla de Navidad","CY":"Chipre","CZ":"Chequia","DE":"Alemania","DG":"Diego García","DJ":"Yibuti","DK":"Dinamarca","DM":"Dominica","DO":"República Dominicana","DZ":"Argelia","EA":"Ceuta y Melilla","EC":"Ecuador","EE":"Estonia","EG":"Egipto","EH":"Sáhara Occidental","ER":"Eritrea","ES":"España","ET":"Etiopía","EU":"Unión Europea","EZ":"Eurozona","FI":"Finlandia","FJ":"Fiyi","FK":"Islas Malvinas","FM":"Micronesia","FO":"Islas Feroe","FR":"Francia","GA":"Gabón","GB":"Reino Unido","GD":"Granada","GE":"Georgia","GF":"Guayana Francesa","GG":"Guernesey","GH":"Ghana","GI":"Gibraltar","GL":"Groenlandia","GM":"Gambia","GN":"Guinea","GP":"Guadalupe","GQ":"Guinea Ecuatorial","GR":"Grecia","GS":"Islas Georgia del Sur y Sandwich del Sur","GT":"Guatemala","GU":"Guam","GW":"Guinea-Bisáu","GY":"Guyana","HK":"RAE de Hong Kong (China)","HM":"Islas Heard y McDonald","HN":"Honduras","HR":"Croacia","HT":"Haití","HU":"Hungría","IC":"Islas Canarias","ID":"Indonesia","IE":"Irlanda","IL":"Israel","IM":"Isla de Man","IN":"India","IO":"Territorio Británico del Océano Índico","IQ":"Irak","IR":"Irán","IS":"Islandia","IT":"Italia","JE":"Jersey","JM":"Jamaica","JO":"Jordania","JP":"Japón","KE":"Kenia","KG":"Kirguistán","KH":"Camboya","KI":"Kiribati","KM":"Comoras","KN":"San Cristóbal y Nieves","KP":"Corea del Norte","KR":"Corea del Sur","KW":"Kuwait","KY":"Islas Caimán","KZ":"Kazajistán","LA":"Laos","LB":"Líbano","LC":"Santa Lucía","LI":"Liechtenstein","LK":"Sri Lanka","LR":"Liberia","LS":"Lesoto","LT":"Lituania","LU":"Luxemburgo","LV":"Letonia","LY":"Libia","MA":"Marruecos","MC":"Mónaco","MD":"Moldavia","ME":"Montenegro","MF":"San Martín","MG":"Madagascar","MH":"Islas Marshall","MK":"Macedonia del Norte","ML":"Mali","MM":"Myanmar (Birmania)","MN":"Mongolia","MO":"RAE de Macao (China)","MP":"Islas Marianas del Norte","MQ":"Martinica","MR":"Mauritania","MS":"Montserrat","MT":"Malta","MU":"Mauricio","MV":"Maldivas","MW":"Malaui","MX":"México","MY":"Malasia","MZ":"Mozambique","NA":"Namibia","NC":"Nueva Caledonia","NE":"Níger","NF":"Isla Norfolk","NG":"Nigeria","NI":"Nicaragua","NL":"Países Bajos","NO":"Noruega","NP":"Nepal","NR":"Nauru","NU":"Niue","NZ":"Nueva Zelanda","OM":"Omán","PA":"Panamá","PE":"Perú","PF":"Polinesia Francesa","PG":"Papúa Nueva Guinea","PH":"Filipinas","PK":"Pakistán","PL":"Polonia","PM":"San Pedro y Miquelón","PN":"Islas Pitcairn","PR":"Puerto Rico","PS":"Territorios Palestinos","PT":"Portugal","PW":"Palaos","PY":"Paraguay","QA":"Catar","QO":"Islas Ultramarinas","RE":"Reunión","RO":"Rumanía","RS":"Serbia","RU":"Rusia","RW":"Ruanda","SA":"Arabia Saudí","SB":"Islas Salomón","SC":"Seychelles","SD":"Sudán","SE":"Suecia","SG":"Singapur","SH":"Santa Elena","SI":"Eslovenia","SJ":"Svalbard y Jan Mayen","SK":"Eslovaquia","SL":"Sierra Leona","SM":"San Marino","SN":"Senegal","SO":"Somalia","SR":"Surinam","SS":"Sudán del Sur","ST":"Santo Tomé y Príncipe","SV":"El Salvador","SX":"Sint Maarten","SY":"Siria","SZ":"Esuatini","TA":"Tristán da Cunha","TC":"Islas Turcas y Caicos","TD":"Chad","TF":"Territorios Australes Franceses","TG":"Togo","TH":"Tailandia","TJ":"Tayikistán","TK":"Tokelau","TL":"Timor-Leste","TM":"Turkmenistán","TN":"Túnez","TO":"Tonga","TR":"Turquía","TT":"Trinidad y Tobago","TV":"Tuvalu","TW":"Taiwán","TZ":"Tanzania","UA":"Ucrania","UG":"Uganda","UM":"Islas menores alejadas de EE. UU.","UN":"Naciones Unidas","US":"Estados Unidos","UY":"Uruguay","UZ":"Uzbekistán","VA":"Ciudad del Vaticano","VC":"San Vicente y las Granadinas","VE":"Venezuela","VG":"Islas Vírgenes Británicas","VI":"Islas Vírgenes de EE. UU.","VN":"Vietnam","VU":"Vanuatu","WF":"Wallis y Futuna","WS":"Samoa","XA":"Pseudoacentos","XB":"Pseudobidi","XK":"Kosovo","YE":"Yemen","YT":"Mayotte","ZA":"Sudáfrica","ZM":"Zambia","ZW":"Zimbabue","ZZ":"Región desconocida"},"short":{"GB":"R. U.","HK":"Hong Kong","MO":"Macao","PS":"Palestina","US":"EE. UU."},"narrow":{}},"script":{"long":{"Adlm":"Adlm","Aghb":"Aghb","Ahom":"Ahom","Arab":"árabe","Aran":"nastaliq","Armi":"Armi","Armn":"armenio","Avst":"avéstico","Bali":"balinés","Bamu":"Bamu","Bass":"Bass","Batk":"batak","Beng":"bengalí","Bhks":"Bhks","Blis":"símbolos blis","Bopo":"bopomofo","Brah":"brahmi","Brai":"braille","Bugi":"buginés","Buhd":"buhid","Cakm":"Cakm","Cans":"silabarios aborígenes canadienses unificados","Cari":"cario","Cham":"cham","Cher":"cherokee","Chrs":"Chrs","Cirt":"cirth","Copt":"copto","Cprt":"chipriota","Cyrl":"cirílico","Cyrs":"cirílico del antiguo eslavo eclesiástico","Deva":"devanagari","Diak":"Diak","Dogr":"Dogr","Dsrt":"deseret","Dupl":"Dupl","Egyd":"egipcio demótico","Egyh":"egipcio hierático","Egyp":"jeroglíficos egipcios","Elba":"Elba","Elym":"Elym","Ethi":"etiópico","Geok":"georgiano eclesiástico","Geor":"georgiano","Glag":"glagolítico","Gong":"Gong","Gonm":"Gonm","Goth":"gótico","Gran":"Gran","Grek":"griego","Gujr":"gujarati","Guru":"gurmuji","Hanb":"han con bopomofo","Hang":"hangul","Hani":"han","Hano":"hanunoo","Hans":"simplificado","Hant":"tradicional","Hatr":"Hatr","Hebr":"hebreo","Hira":"hiragana","Hluw":"Hluw","Hmng":"pahawh hmong","Hmnp":"Hmnp","Hrkt":"katakana o hiragana","Hung":"húngaro antiguo","Inds":"Indio (harappan)","Ital":"antigua bastardilla","Jamo":"jamo","Java":"javanés","Jpan":"japonés","Kali":"kayah li","Kana":"katakana","Khar":"kharosthi","Khmr":"jemer","Khoj":"Khoj","Kits":"Kits","Knda":"canarés","Kore":"coreano","Kthi":"Kthi","Lana":"lanna","Laoo":"lao","Latf":"latino fraktur","Latg":"latino gaélico","Latn":"latín","Lepc":"lepcha","Limb":"limbu","Lina":"lineal A","Linb":"lineal B","Lisu":"Lisu","Lyci":"licio","Lydi":"lidio","Mahj":"Mahj","Maka":"Maka","Mand":"mandeo","Mani":"Mani","Marc":"Marc","Maya":"jeroglíficos mayas","Medf":"Medf","Mend":"Mend","Merc":"Merc","Mero":"meroítico","Mlym":"malayalam","Modi":"Modi","Mong":"mongol","Moon":"moon","Mroo":"Mroo","Mtei":"manipuri","Mult":"Mult","Mymr":"birmano","Nand":"Nand","Narb":"Narb","Nbat":"Nbat","Newa":"Newa","Nkoo":"n’ko","Nshu":"Nshu","Ogam":"ogham","Olck":"ol chiki","Orkh":"orkhon","Orya":"oriya","Osge":"Osge","Osma":"osmaniya","Palm":"Palm","Pauc":"Pauc","Perm":"permiano antiguo","Phag":"phags-pa","Phli":"Phli","Phlp":"Phlp","Phnx":"fenicio","Plrd":"Pollard Miao","Prti":"Prti","Qaag":"zawgyi","Rjng":"rejang","Rohg":"Rohg","Roro":"rongo-rongo","Runr":"rúnico","Samr":"Samr","Sara":"sarati","Sarb":"Sarb","Saur":"saurashtra","Sgnw":"SignWriting","Shaw":"shaviano","Shrd":"Shrd","Sidd":"Sidd","Sind":"Sind","Sinh":"cingalés","Sogd":"Sogd","Sogo":"Sogo","Sora":"Sora","Soyo":"Soyo","Sund":"sundanés","Sylo":"syloti nagri","Syrc":"siriaco","Syre":"siriaco estrangelo","Syrj":"siriaco occidental","Syrn":"siriaco oriental","Tagb":"tagbanúa","Takr":"Takr","Tale":"tai le","Talu":"nuevo tai lue","Taml":"tamil","Tang":"Tang","Tavt":"Tavt","Telu":"telugu","Teng":"tengwar","Tfng":"tifinagh","Tglg":"tagalo","Thaa":"thaana","Thai":"tailandés","Tibt":"tibetano","Tirh":"Tirh","Ugar":"ugarítico","Vaii":"vai","Visp":"lenguaje visible","Wara":"Wara","Wcho":"Wcho","Xpeo":"persa antiguo","Xsux":"cuneiforme sumerio-acadio","Yezi":"Yezi","Yiii":"yi","Zanb":"Zanb","Zinh":"heredado","Zmth":"notación matemática","Zsye":"emojis","Zsym":"símbolos","Zxxx":"no escrito","Zyyy":"común","Zzzz":"alfabeto desconocido"},"short":{},"narrow":{}},"currency":{"long":{"ADP":"peseta andorrana","AED":"dírham de los Emiratos Árabes Unidos","AFA":"afgani (1927–2002)","AFN":"afgani","ALK":"ALK","ALL":"lek","AMD":"dram","ANG":"florín de las Antillas Neerlandesas","AOA":"kuanza","AOK":"kwanza angoleño (1977–1990)","AON":"nuevo kwanza angoleño (1990–2000)","AOR":"kwanza reajustado angoleño (1995–1999)","ARA":"austral argentino","ARL":"ARL","ARM":"ARM","ARP":"peso argentino (1983–1985)","ARS":"peso argentino","ATS":"chelín austriaco","AUD":"dólar australiano","AWG":"florín arubeño","AZM":"manat azerí (1993–2006)","AZN":"manat azerbaiyano","BAD":"dinar bosnio","BAM":"marco convertible de Bosnia y Herzegovina","BAN":"BAN","BBD":"dólar barbadense","BDT":"taka","BEC":"franco belga (convertible)","BEF":"franco belga","BEL":"franco belga (financiero)","BGL":"lev fuerte búlgaro","BGM":"BGM","BGN":"lev búlgaro","BGO":"BGO","BHD":"dinar bahreiní","BIF":"franco burundés","BMD":"dólar de Bermudas","BND":"dólar bruneano","BOB":"boliviano","BOL":"BOL","BOP":"peso boliviano","BOV":"MVDOL boliviano","BRB":"nuevo cruceiro brasileño (1967–1986)","BRC":"cruzado brasileño","BRE":"cruceiro brasileño (1990–1993)","BRL":"real brasileño","BRN":"nuevo cruzado brasileño","BRR":"cruceiro brasileño","BRZ":"BRZ","BSD":"dólar bahameño","BTN":"gultrum","BUK":"kyat birmano","BWP":"pula","BYB":"nuevo rublo bielorruso (1994–1999)","BYN":"rublo bielorruso","BYR":"rublo bielorruso (2000–2016)","BZD":"dólar beliceño","CAD":"dólar canadiense","CDF":"franco congoleño","CHE":"euro WIR","CHF":"franco suizo","CHW":"franco WIR","CLE":"CLE","CLF":"unidad de fomento chilena","CLP":"peso chileno","CNH":"yuan chino (extracontinental)","CNX":"CNX","CNY":"yuan","COP":"peso colombiano","COU":"unidad de valor real colombiana","CRC":"colón costarricense","CSD":"antiguo dinar serbio","CSK":"corona fuerte checoslovaca","CUC":"peso cubano convertible","CUP":"peso cubano","CVE":"escudo de Cabo Verde","CYP":"libra chipriota","CZK":"corona checa","DDM":"ostmark de Alemania del Este","DEM":"marco alemán","DJF":"franco yibutiano","DKK":"corona danesa","DOP":"peso dominicano","DZD":"dinar argelino","ECS":"sucre ecuatoriano","ECV":"unidad de valor constante (UVC) ecuatoriana","EEK":"corona estonia","EGP":"libra egipcia","ERN":"nakfa","ESA":"peseta española (cuenta A)","ESB":"peseta española (cuenta convertible)","ESP":"peseta española","ETB":"bir","EUR":"euro","FIM":"marco finlandés","FJD":"dólar fiyiano","FKP":"libra malvinense","FRF":"franco francés","GBP":"libra esterlina","GEK":"kupon larit georgiano","GEL":"lari","GHC":"cedi ghanés (1979–2007)","GHS":"cedi","GIP":"libra gibraltareña","GMD":"dalasi","GNF":"franco guineano","GNS":"syli guineano","GQE":"ekuele de Guinea Ecuatorial","GRD":"dracma griego","GTQ":"quetzal guatemalteco","GWE":"escudo de Guinea Portuguesa","GWP":"peso de Guinea-Bissáu","GYD":"dólar guyanés","HKD":"dólar hongkonés","HNL":"lempira hondureño","HRD":"dinar croata","HRK":"kuna","HTG":"gourde haitiano","HUF":"forinto húngaro","IDR":"rupia indonesia","IEP":"libra irlandesa","ILP":"libra israelí","ILR":"ILR","ILS":"nuevo séquel israelí","INR":"rupia india","IQD":"dinar iraquí","IRR":"rial iraní","ISJ":"ISJ","ISK":"corona islandesa","ITL":"lira italiana","JMD":"dólar jamaicano","JOD":"dinar jordano","JPY":"yen","KES":"chelín keniano","KGS":"som","KHR":"riel","KMF":"franco comorense","KPW":"won norcoreano","KRH":"KRH","KRO":"KRO","KRW":"won surcoreano","KWD":"dinar kuwaití","KYD":"dólar de las Islas Caimán","KZT":"tenge kazako","LAK":"kip","LBP":"libra libanesa","LKR":"rupia esrilanquesa","LRD":"dólar liberiano","LSL":"loti lesothense","LTL":"litas lituano","LTT":"talonas lituano","LUC":"franco convertible luxemburgués","LUF":"franco luxemburgués","LUL":"franco financiero luxemburgués","LVL":"lats letón","LVR":"rublo letón","LYD":"dinar libio","MAD":"dírham marroquí","MAF":"franco marroquí","MCF":"MCF","MDC":"MDC","MDL":"leu moldavo","MGA":"ariari","MGF":"franco malgache","MKD":"dinar macedonio","MKN":"MKN","MLF":"franco malí","MMK":"kiat","MNT":"tugrik","MOP":"pataca de Macao","MRO":"uguiya (1973–2017)","MRU":"uguiya","MTL":"lira maltesa","MTP":"libra maltesa","MUR":"rupia mauriciana","MVP":"MVP","MVR":"rufiya","MWK":"kwacha malauí","MXN":"peso mexicano","MXP":"peso de plata mexicano (1861–1992)","MXV":"unidad de inversión (UDI) mexicana","MYR":"ringit","MZE":"escudo mozambiqueño","MZM":"antiguo metical mozambiqueño","MZN":"metical","NAD":"dólar namibio","NGN":"naira","NIC":"córdoba nicaragüense (1988–1991)","NIO":"córdoba nicaragüense","NLG":"florín neerlandés","NOK":"corona noruega","NPR":"rupia nepalí","NZD":"dólar neozelandés","OMR":"rial omaní","PAB":"balboa panameño","PEI":"inti peruano","PEN":"sol peruano","PES":"sol peruano (1863–1965)","PGK":"kina","PHP":"peso filipino","PKR":"rupia pakistaní","PLN":"esloti","PLZ":"zloty polaco (1950–1995)","PTE":"escudo portugués","PYG":"guaraní paraguayo","QAR":"rial catarí","RHD":"dólar rodesiano","ROL":"antiguo leu rumano","RON":"leu rumano","RSD":"dinar serbio","RUB":"rublo ruso","RUR":"rublo ruso (1991–1998)","RWF":"franco ruandés","SAR":"rial saudí","SBD":"dólar salomonense","SCR":"rupia seychellense","SDD":"dinar sudanés","SDG":"libra sudanesa","SDP":"libra sudanesa antigua","SEK":"corona sueca","SGD":"dólar singapurense","SHP":"libra de Santa Elena","SIT":"tólar esloveno","SKK":"corona eslovaca","SLL":"leona","SOS":"chelín somalí","SRD":"dólar surinamés","SRG":"florín surinamés","SSP":"libra sursudanesa","STD":"dobra (1977–2017)","STN":"dobra","SUR":"rublo soviético","SVC":"colón salvadoreño","SYP":"libra siria","SZL":"lilangeni","THB":"baht tailandes","TJR":"rublo tayiko","TJS":"somoni tayiko","TMM":"manat turcomano (1993–2009)","TMT":"manat turcomano","TND":"dinar tunecino","TOP":"paanga","TPE":"escudo timorense","TRL":"lira turca (1922–2005)","TRY":"lira turca","TTD":"dólar de Trinidad y Tobago","TWD":"nuevo dólar taiwanés","TZS":"chelín tanzano","UAH":"grivna","UAK":"karbovanet ucraniano","UGS":"chelín ugandés (1966–1987)","UGX":"chelín ugandés","USD":"dólar estadounidense","USN":"dólar estadounidense (día siguiente)","USS":"dólar estadounidense (mismo día)","UYI":"peso uruguayo en unidades indexadas","UYP":"peso uruguayo (1975–1993)","UYU":"peso uruguayo","UYW":"unidad previsional uruguayo","UZS":"som uzbeko","VEB":"bolívar venezolano (1871–2008)","VEF":"bolívar venezolano (2008–2018)","VES":"bolívar venezolano","VND":"dong","VNN":"VNN","VUV":"vatu","WST":"tala","XAF":"franco CFA de África Central","XAG":"plata","XAU":"oro","XBA":"unidad compuesta europea","XBB":"unidad monetaria europea","XBC":"unidad de cuenta europea (XBC)","XBD":"unidad de cuenta europea (XBD)","XCD":"dólar del Caribe Oriental","XDR":"derechos especiales de giro","XEU":"unidad de moneda europea","XFO":"franco oro francés","XFU":"franco UIC francés","XOF":"franco CFA de África Occidental","XPD":"paladio","XPF":"franco CFP","XPT":"platino","XRE":"fondos RINET","XSU":"XSU","XTS":"código reservado para pruebas","XUA":"XUA","XXX":"moneda desconocida","YDD":"dinar yemení","YER":"rial yemení","YUD":"dinar fuerte yugoslavo","YUM":"super dinar yugoslavo","YUN":"dinar convertible yugoslavo","YUR":"YUR","ZAL":"rand sudafricano (financiero)","ZAR":"rand","ZMK":"kwacha zambiano (1968–2012)","ZMW":"kuacha zambiano","ZRN":"nuevo zaire zaireño","ZRZ":"zaire zaireño","ZWD":"dólar de Zimbabue","ZWL":"dólar zimbabuense","ZWR":"ZWR"},"short":{},"narrow":{}}},"patterns":{"locale":"{0} ({1})"}},"locale":"es-SV"}
)
}
|
#include "db.h"
#include "daemon/log.h"
#include "lightningd/lightningd.h"
#include <ccan/tal/str/str.h>
#include <ccan/tal/tal.h>
#include <inttypes.h>
#define DB_FILE "lightningd.sqlite3"
/* Do not reorder or remove elements from this array, it is used to
* migrate existing databases from a previous state, based on the
* string indices */
char *dbmigrations[] = {
"CREATE TABLE version (version INTEGER)",
"INSERT INTO version VALUES (1)",
"CREATE TABLE outputs ( \
prev_out_tx CHAR(64), \
prev_out_index INTEGER, \
value INTEGER, \
type INTEGER, \
status INTEGER, \
keyindex INTEGER, \
PRIMARY KEY (prev_out_tx, prev_out_index) \
);",
"CREATE TABLE vars (name VARCHAR(32), val VARCHAR(255), PRIMARY KEY (name));",
NULL,
};
bool PRINTF_FMT(3, 4)
db_exec(const char *caller, struct db *db, const char *fmt, ...)
{
va_list ap;
char *cmd, *errmsg;
int err;
if (db->in_transaction && db->err)
return false;
va_start(ap, fmt);
cmd = tal_vfmt(db, fmt, ap);
va_end(ap);
err = sqlite3_exec(db->sql, cmd, NULL, NULL, &errmsg);
if (err != SQLITE_OK) {
db->in_transaction = false;
tal_free(db->err);
db->err = tal_fmt(db, "%s:%s:%s:%s", caller,
sqlite3_errstr(err), cmd, errmsg);
sqlite3_free(errmsg);
tal_free(cmd);
return false;
}
tal_free(cmd);
return true;
}
sqlite3_stmt *PRINTF_FMT(3, 4)
db_query(const char *caller, struct db *db, const char *fmt, ...)
{
va_list ap;
char *query;
sqlite3_stmt *stmt;
int err;
if (db->in_transaction && db->err)
return NULL;
va_start(ap, fmt);
query = tal_vfmt(db, fmt, ap);
va_end(ap);
err = sqlite3_prepare_v2(db->sql, query, -1, &stmt, NULL);
if (err != SQLITE_OK) {
db->in_transaction = false;
db->err = tal_fmt(db, "%s:%s:%s:%s", caller,
sqlite3_errstr(err), query, sqlite3_errmsg(db->sql));
}
return stmt;
}
/**
* db_clear_error - Clear any errors from previous queries
*/
static void db_clear_error(struct db *db)
{
db->err = tal_free(db->err);
}
static void close_db(struct db *db) { sqlite3_close(db->sql); }
bool db_begin_transaction(struct db *db)
{
assert(!db->in_transaction);
/* Clear any errors from previous transactions and
* non-transactional queries */
db_clear_error(db);
db->in_transaction = db_exec(__func__, db, "BEGIN TRANSACTION;");
return db->in_transaction;
}
bool db_commit_transaction(struct db *db)
{
assert(db->in_transaction);
bool ret = db_exec(__func__, db, "COMMIT;");
db->in_transaction = false;
return ret;
}
bool db_rollback_transaction(struct db *db)
{
assert(db->in_transaction);
bool ret = db_exec(__func__, db, "ROLLBACK;");
db->in_transaction = false;
return ret;
}
/**
* db_open - Open or create a sqlite3 database
*/
static struct db *db_open(const tal_t *ctx, char *filename)
{
int err;
struct db *db;
sqlite3 *sql;
if (SQLITE_VERSION_NUMBER != sqlite3_libversion_number())
fatal("SQLITE version mistmatch: compiled %u, now %u",
SQLITE_VERSION_NUMBER, sqlite3_libversion_number());
int flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
err = sqlite3_open_v2(filename, &sql, flags, NULL);
if (err != SQLITE_OK) {
fatal("failed to open database %s: %s", filename,
sqlite3_errstr(err));
}
db = tal(ctx, struct db);
db->filename = tal_dup_arr(db, char, filename, strlen(filename), 0);
db->sql = sql;
tal_add_destructor(db, close_db);
db->in_transaction = false;
db->err = NULL;
return db;
}
/**
* db_get_version - Determine the current DB schema version
*
* Will attempt to determine the current schema version of the
* database @db by querying the `version` table. If the table does not
* exist it'll return schema version -1, so that migration 0 is
* applied, which should create the `version` table.
*/
static int db_get_version(struct db *db)
{
int err;
u64 res = -1;
sqlite3_stmt *stmt =
db_query(__func__, db, "SELECT version FROM version LIMIT 1");
if (!stmt)
return -1;
err = sqlite3_step(stmt);
if (err != SQLITE_ROW) {
sqlite3_finalize(stmt);
return -1;
} else {
res = sqlite3_column_int64(stmt, 0);
sqlite3_finalize(stmt);
return res;
}
}
/**
* db_mirgation_count - Count how many migrations are available
*
* Returns the maximum migration index, i.e., the version number of an
* up-to-date database schema.
*/
static int db_migration_count(void)
{
int count = 0;
while (dbmigrations[count] != NULL)
count++;
return count - 1;
}
/**
* db_migrate - Apply all remaining migrations from the current version
*/
static bool db_migrate(struct db *db)
{
/* Attempt to read the version from the database */
int current = db_get_version(db);
int available = db_migration_count();
if (!db_begin_transaction(db)) {
/* No need to rollback, we didn't even start... */
return false;
}
while (++current <= available) {
if (!db_exec(__func__, db, "%s", dbmigrations[current]))
goto fail;
}
/* Finally update the version number in the version table */
db_exec(__func__, db, "UPDATE version SET version=%d;", available);
if (!db_commit_transaction(db)) {
goto fail;
}
return true;
fail:
db_rollback_transaction(db);
return false;
}
struct db *db_setup(const tal_t *ctx)
{
struct db *db = db_open(ctx, DB_FILE);
if (!db) {
return db;
}
if (!db_migrate(db)) {
return tal_free(db);
}
return db;
}
s64 db_get_intvar(struct db *db, char *varname, s64 defval)
{
int err;
s64 res = defval;
const unsigned char *stringvar;
sqlite3_stmt *stmt =
db_query(__func__, db,
"SELECT val FROM vars WHERE name='%s' LIMIT 1", varname);
if (!stmt)
return defval;
err = sqlite3_step(stmt);
if (err == SQLITE_ROW) {
stringvar = sqlite3_column_text(stmt, 0);
res = atol((const char *)stringvar);
}
sqlite3_finalize(stmt);
return res;
}
bool db_set_intvar(struct db *db, char *varname, s64 val)
{
/* Attempt to update */
db_exec(__func__, db,
"UPDATE vars SET val='%" PRId64 "' WHERE name='%s';", val,
varname);
if (sqlite3_changes(db->sql) > 0)
return true;
else
return db_exec(
__func__, db,
"INSERT INTO vars (name, val) VALUES ('%s', '%" PRId64
"');",
varname, val);
}
|
import os
from services.base import BaseService
class Facebook(BaseService):
def exchange_token(self, code, callback_url):
"""
Retrieve the access token given after acquiring authorization code
Detailed archived documentation can be found at https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow
:param code: A one-time use code that may be exchanged for a bearer token
:type code: str
:param callback_url: Callback URL from application domain
:type callback_url: str
:return: Base service result object containing response data
:rtype: BaseServiceResult
"""
return self.get(
route="oauth/access_token",
parse_json=True,
params={
"client_id": FACEBOOK_CLIENT_ID,
"client_secret": FACEBOOK_CLIENT_SECRET,
"code": code,
"redirect_uri": callback_url,
},
)
def get_user_post(self, access_token, user_id="me"):
"""
Fetch individual user post by default
:param access_token: Access token acquired from Facebook OAuth process
:type access_token: str
:param user_id: Facebook user ID, defaults to 'me'
:type user_id: str, optional
:return: Base service result object containing response data
:rtype: BaseServiceResult
"""
def extract_post_data(post_data):
"""
Helper function to extract post data
:param post_data: Received post data information
:type post_data: dict
:return: Processed post data information
:rtype: dict
"""
import datetime
return {
"message": post_data["message"],
"time": datetime.datetime.strptime(
post_data["created_time"], "%Y-%m-%dT%H:%M:%S+0000"
)
.replace(tzinfo=datetime.timezone.utc)
.timestamp(),
"id": post_data["id"],
}
response = self.get(
route=f"{user_id}/feed", params={"access_token": access_token}
)
post_data = response.data["data"]
post_data = list(filter(lambda post: "message" in post.keys(), post_data))
response.data = list(map(lambda post: extract_post_data(post), post_data))
return response
def get_user_profile(self, access_token, user_id="me"):
"""
Fetch individual user profile by default
:param access_token: Access token acquired to access Facebook Graph API
:type access_token: str
:param user_id: Facebook user ID, defaults to 'me'
:type user_id: str, optional
:return: Base service result object containing response data
:rtype: BaseServiceResult
"""
response = self.get(route=f"{user_id}", params={"access_token": access_token})
if (
response.status_code == 400
and type(response.data) == dict
and response.data["error"]["code"] == 190
):
response.status_code = 401
return response
def extract_user_profile(self, data):
"""
Extract user profile data
:param data: User profile data
:type data: dict
:return: Processed profile data
:rtype: dict
"""
return {"id": data["id"], "name": data["name"]}
FACEBOOK_API_BASE_URL = "https://graph.facebook.com/v6.0"
FACEBOOK_CLIENT_ID = os.getenv("FACEBOOK_CLIENT_ID")
FACEBOOK_CLIENT_SECRET = os.getenv("FACEBOOK_CLIENT_SECRET")
FacebookService = Facebook("facebook", FACEBOOK_API_BASE_URL)
|
from django import forms
class URLForm(forms.Form):
longurl = forms.URLField(max_length=250)
custom_name = forms.CharField(max_length = 30, required = False)
|
const Device = require('../device-miio');
const { withLightEffect } = require('../utils');
module.exports = class extends Device {
static model = 'yeelink.light.strip1';
static name = 'Yeelight Lightstrip';
static image = 'https://static.home.mi.com/app/image/get/file/developer_1551948702kcy7aei4.png';
constructor(opts) {
super(opts);
this._propertiesToMonitor = ['power', 'bright', 'ct', 'hue', 'saturation'];
}
getPower() {
const { power } = this.properties;
if (power === 'on') return true;
if (power === 'off') return false;
return undefined;
}
getBrightness() {
const brightness = parseInt(this.properties.bright, 10);
if (brightness > 0) return brightness;
return undefined;
}
getColorTemperature() {
const colorTemperature = parseInt(this.properties.ct, 10);
if (colorTemperature > 0) return colorTemperature;
return undefined;
}
getColorHue() {
const colorHue = parseInt(this.properties.hue, 10);
if (colorHue >= 0) return colorHue;
return undefined;
}
getColorSaturation() {
const colorSaturation = parseInt(this.properties.saturation, 10);
if (colorSaturation >= 0) return colorSaturation;
return undefined;
}
setPower(v) {
return this.miioCall('set_power', withLightEffect(v ? 'on' : 'off'));
}
setBrightness(v) {
return this.miioCall('set_bright', withLightEffect(v));
}
setColorTemperature(v) {
return this.miioCall('set_ct_abx', withLightEffect(v));
}
setColorHSV(v) {
this._miioCall('set_hsv', withLightEffect([v.hue, v.saturation]));
}
};
|
import http.server, requests, json
username_cache = {}
whitelist = ("current.html", "runner64.png", "flags.min.css", "flags.png")
mimes = {
"html": "text/html",
"css": "text/css",
"png": "image/png",
"json": "application/jsonn"
}
PAGE_404 = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>404 Not Found</title>
</head>
<body>
nuthin to see here bruh
</body>
</html>
"""
HEADERS = {"User-Agent": "Jobicade's Magnificent PB/WR grabber"}
GAME_ID = "4pd0n31e"
CATEGORY_ID = "wk6pexd1"
USER_ID = "48g0ln2x"
BASE = "http://www.speedrun.com/api/v1/"
PB_QUERY = BASE + "users/" + USER_ID + "/personal-bests"
WR_QUERY = BASE + "leaderboards/" + GAME_ID + "/category/" + CATEGORY_ID
class RecordRequest(http.server.BaseHTTPRequestHandler):
@staticmethod
def json(url, path=None, **kwargs):
root = requests.get(url, **kwargs, headers=HEADERS).json()
if path:
for key in path:
root = root[key]
return root
def do_GET(self):
if self.path[0] == "/":
self.path = self.path[1:]
headers_out = {}
code = 200
redirects = ("current", "current.html", "index", "index.html")
if self.path in redirects:
code = 301
headers_out["Location"] = "/"
self.path = "current.html"
# This is the correct path, don't complain about it
if self.path == "": self.path = "current.html"
if self.path == "data":
headers_out["Content-Type"] = "application/json; charset=utf-8"
# Grab responses from speedrun.com...
pb = RecordRequest.json(PB_QUERY, ("data", 0), params={"game": GAME_ID, "category": CATEGORY_ID})
wr = RecordRequest.json(WR_QUERY, ("data", "runs", 0, "run"), params={"top": 1, "timing": "realtime_noloads"})
wr_runner = wr["players"][0]
if not wr_runner["id"] in username_cache:
username_cache[wr_runner["id"]] = RecordRequest.json(wr_runner["uri"], ("data",))
pb["run"]["place"] = pb["place"]
pb = pb["run"]
wr["player"] = username_cache[wr_runner["id"]]
response = json.dumps({"pb": pb, "wr": wr}).encode("utf-8")
elif self.path in whitelist:
headers_out["Content-Type"] = mimes[self.path[self.path.rfind(".")+1:]]
with open(self.path, "rb") as f:
response = f.read()
else:
code = 404
response = PAGE_404.encode("utf-8")
headers_out["Content-Type"] = "text/html; charset=utf-8"
headers_out["Content-Length"] = len(response)
self.send_response(code)
for key in headers_out:
self.send_header(key, headers_out[key])
self.end_headers()
self.wfile.write(response)
httpd = http.server.HTTPServer(('127.0.0.1', 80), RecordRequest)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
|
var searchData=
[
['quit_2909',['quit',['../classgui__framework_1_1_base_composite.html#a438c1f5c9ef171bc01b2ac420fee985cadbd73c2b545209688ed794c0d5413d5a',1,'gui_framework::BaseComposite']]]
];
|
import os
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf # Version 1.0.0 (some previous versions are used in past commits)
from sklearn import metrics
import random
from random import randint
import argparse
import logging
import time
import operator
import imutils
import cv2
import numpy as np
import math
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from itertools import chain, count
from sklearn.neighbors import NearestNeighbors
from collections import defaultdict
import winsound
import darknet.json as dk
import facerec.recognize as fr
# import deepface.deepface as df
import security
## Input management
CAMERA = [] # Default value, if no camera is given, switch to video mode
VIDEO = "utilities/test_vid.mp4"
REAL_FPS = 6
PROC_FPS = 3 # Proc is surely < Real
SKIP_FRAME = round(REAL_FPS/PROC_FPS) - 1
# 5th is face camera. Remove to use cailing cams cropped by FREG.
# CAMERA = [0]
# CAMERA = [0, 1]
# CAMERA = [cv2.CAP_DSHOW + 0] # Using directshow to fix black bar
# CAMERA = ["rtsp://167.205.66.187:554/onvif1"]
# CAMERA = [ "rtsp://167.205.66.147:554/onvif1",
# "rtsp://167.205.66.148:554/onvif1",
# "rtsp://167.205.66.149:554/onvif1",
# "rtsp://167.205.66.150:554/onvif1",
# cv2.CAP_DSHOW + 0 ]
CAMERA = [ "rtsp://192.168.0.108:554/onvif1",
"rtsp://192.168.0.107:554/onvif1",
"rtsp://192.168.0.104:554/onvif1",
"rtsp://192.168.0.110:554/onvif1",
cv2.CAP_DSHOW + 0 ]
FPSLIM = 3 # Set to 0 for unlimited
# Size of the images, act as a boundary
IMAGE = [1024,576]
SUBIM = [512,288]
ROTATE = [0, 0, 0, 0, 270]
# ROTATE = [180, 180, 180, 180, 90]
# Face camera, the fifth camera on the list
FCAMDS = 1 # Face camera downscale
# FCAMCP = [0.2, 1-0.5, 0.2, 1-0.2] # Crop fraction from top, bottom, left, right
FCAMCP = [0.35, 1-0.25, 0.2, 1-0.2] # Crop fraction from top, bottom, left, right
FCOFF = SUBIM # Center location of face camera
## System-wide parameters
# Disable/Enable the actual systems and not just visual change
SYS_OPOSE = True
SYS_ACT = SYS_OPOSE and True
SYS_DARK = False
SYS_FACEREC = True
# OPSIZE = "256x144"
# OPSIZE = "512x288"
# OPSIZE = "768x432"
OPSIZE = "1024x576"
# OPSIZE = "1280x720"
# OPSIZE = "1536x864"
# GPU fraction limit
LSGPU = 0./6.0
OPGPU = 0./6.0
# LSGPU = 0./6.0
# OPGPU = 1/6.0
FREG = [0,25,0,25]
# FREG = [0,50,0,50]
## LSTM Parameters
# N_STEPS = 8
N_STEPS = 5
# DATASET_PATH = "data/"
# DATASET_PATH = "data/Overlap_fixed/"
# DATASET_PATH = "data/Overlap_fixed4/"
# DATASET_PATH = "data/Overlap_fixed4_separated/"
# DATASET_PATH = "data/2a_Amplify/"
# DATASET_PATH = "data/Direct2a/"
# DATASET_PATH = "data/Direct2a/Normalize/"
# DATASET_PATH = "data/Direct2a/NormalizePoint/"
DATASET_PATH = "data/Direct2a/NormalizeOnce/"
# DATASET_PATH = "data/Test/5/"
LAYER = 2 # 1: Default [36,36] # 2: Simpler [36]
## Preprocessing schemes, only applies right before the poses loaded to LSTM.
# No effect to the original pose data.
# Group A, main preprocessing:
# 1: Amplify - Poses emulated as if there's a big border between sub-images
# 2: Normalize - Individual pose returned to origin
# 3: NormalizeOnce - Every pose in a gesture will be relative to the first in the gesture
# 4: NormalizePoint - Every point in a gesture will be relative to the first point in the gesture
# 5: Reverse - Poses in 4 sub-images emulated as if happening in a single image
# Other: No preprocessing
POSEAMP = 1000 # [Amplify] Value added if a pose is over the sub-image boundary
# Group B, idle management:
# 1: Null - Unmoving gestures (average) are forced to be all null
# 2: Null - Unmoving gestures (key point [neck, or nose]) are forced to be all null
# Other: No preprocessing
IDLETH = int(IMAGE[0]/80) # Max distance (in coord) a gesture forced to be idling
PREPROC = [3,2]
## Label id selection schemes
# No effect to the original pose data. Based on the index:
# 0: Weighted - Positive poses receive boosted confidence (lowering false "suspicious").
# 1: Grouped - Big gesture (DR, UR, DL, UL, ND) will be groups, averaged, max obtained.
# Labels in losing groups will be totally ignored (zero)
# After: Max confidence
LABSEL = [True,False]
# Label weight for weighted label scheme, multiplied to the base confidence
LABWEI = np.array([1,1,1,1, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0]) * 0.2 + 1
# LABWEI = np.array([1,1,1,1, 0,0,0,0, 0,0,0,0, 0,0,0,0]) * 0.2 + 1
LABGRO = [ [0,4,8,12],
[1,5,9,13],
[2,6,10,14],
[3,7,11,15],
[16]]
LABELS = [
"jalan_DR", "jalan_UR", "jalan_DL", "jalan_UL",
"barang2_DR", "barang2_UR", "barang2_DL", "barang2_UL",
"barang1l_DR", "barang1l_UR", "barang1l_DL", "barang1l_UL",
"barang1r_DR", "barang1r_UR", "barang1r_DL", "barang1r_UL",
"diam_ND"
]
# LABELS = [
# "jalan_NE", "jalan_NW", "jalan_SE", "jalan_SW",
# "menyapu_NE", "menyapu_NW", "menyapu_SE", "menyapu_SW",
# "barang_NE", "barang_NW", "barang_SE", "barang_SW",
# "diam_NE", "diam_NW", "diam_SE", "diam_SW"
# ]
# LABELS = ["normal", "anomaly"]
## Security Parameters
N_HIST = 10
FRPARAM = 0.3 # Individual frame parameter, depending on the post processing used.
HISTH = 0.5 # Historical threshold for final trigger.
## Postprocessing schemes, historical level calculation
# Before: N_HIST frames collected, each having percentage of positive detections vs. all detections
# 0: Count threshold - Percentage of frames above PARAM threshold vs. all frames.
# 1: Average - Average all frames (no PARAM required)
# 2: Percentile - Calculate the PARAM percentile from all frames
# After: Check against historical threshold
POSTPROC = 2
# Alarms & indicators
ALDUR = 2 # Alarm duration in seconds (using the file duration if it's shorter)
ALAUTH = 4 # Authorized state duration, if there's any known face
ALSND = "utilities/alarm.wav" # Alarm sound directory
## Utilities
# Prevent face blinking, hold prev result if new result is empty
HFACE = 0
# Prescale & Pratical face_reg region
FPSCALE = 1 # The face image prescale divisor
FUP = 2 # Facerec model upsample
# Cropping ceiling cams for face recog region
# FREG = [0, 200, 250, 800] # Face region, for single SW camera [y1, y2, x1, x2], 1024x576 single image
# FREG = [288+0, 288+100, 512+125, 512+340] # Face region, for SW camera in 2x2 [y1, y2, x1, x2], 1024x576 four images
# FREG = [0, 576, 0, 1024]
# FREG = [350, 510, 400, 600]
FREG = [210, 360, 425, 590]
# Exit zone [y1, y2, x1, x2]
EX = [288,375,701,800]
EXR = 3 # Radius (square) from pose point to be used as color reference
EXTH = 0.2 # Threshold in distance fraction
# Masking areas to NOT be detected by openpose.
# Used to hide noisy area unpassable by human. (Masks are not shown during preview)
# The mask is a polygon, specify the vertices location.
DOMASK = 1
DRAWMASK = 0 # Preview the masking or keep it hidden
# PMASK = [ np.array([[610,520],[770,430],[960,576],[660,576]], np.int32), # SW
# np.array([[185,430],[255,470],[70,570],[0,575],[0,530]], np.int32), # SE
# np.array([[760,200],[880,288],[1024,134],[985,44]], np.int32), # NW
# np.array([[260,190],[50,50],[136,53],[327,157]], np.int32) # NE
# ]
# PMASK = [ np.array([[290,200],[0,0],[512,0],[350,180]], np.int32), # NE
# np.array([[650,200],[800,288],[1024,288],[1024,0],[985,44]], np.int32), # NW
# np.array([[185,430],[255,470],[70,570],[0,575],[0,300]], np.int32), # SE
# np.array([[610,520],[700,420],[770,380],[960,576],[660,576]], np.int32), # SW
# np.array([[950,400],[1024,400],[1024,500]], np.int32)] # SW
# PMASK = [ np.array([[290,200],[0,0],[512,0],[350,180]], np.int32), # NE
# np.array([[650,200],[800,288],[1024,288],[1024,0],[985,44]], np.int32), # NW
# np.array([[275,400],[190,400],[200,480],[270,460]], np.int32), # SE
# np.array([[185,430],[255,470],[70,570],[0,575],[0,300]], np.int32), # SE
# np.array([[900,576],[700,420],[640,400],[512,576]], np.int32), # SW
# np.array([[950,400],[1024,400],[1024,500]], np.int32)] # SW
# PMASK = [ np.array([[0,0],[1024,0],[1024,576],[0,576]], np.int32) ]
PMASK = [ np.array([[579,500],[580,575],[760,574],[756,473],[724,443]], np.int32),
np.array([[384,339],[329,401],[154,343],[225,288],[386,287]], np.int32),
np.array([[960,478],[905,573],[1023,574],[1024,466]], np.int32),
np.array([[360,285],[393,229],[509,190],[511,365],[475,315]], np.int32),
np.array([[635,338],[706,374],[514,449],[516,364]], np.int32)]
DUMMY = False
SKX = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34]
SKY = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35]
class mainhuman_activity:
# Pre-processing for every image
def preprocess(raws, rots):
imgs = []
for img, rot in zip(raws, rots):
img = cv2.resize(img, dsize=(SUBIM[0], SUBIM[1]), interpolation=cv2.INTER_CUBIC) # 16:9
# img = cv2.resize(img, dsize=(1024, 576), interpolation=cv2.INTER_CUBIC) # 16:9
# img = cv2.resize(img, dsize=(512, 288), interpolation=cv2.INTER_CUBIC) # 16:9
# img = cv2.resize(img, dsize=(256, 144), interpolation=cv2.INTER_CUBIC) # 16:9
# img = cv2.resize(img, dsize=(464, 288), interpolation=cv2.INTER_CUBIC) # 16:10
# img = cv2.resize(img, dsize=(640, 480), interpolation=cv2.INTER_CUBIC) # 4:3
# img = cv2.resize(img, dsize=(320, 240), interpolation=cv2.INTER_CUBIC) # 4:3
# img = cv2.resize(img, dsize=(160, 120), interpolation=cv2.INTER_CUBIC) # 4:3
img = imutils.rotate_bound(img, rot)
imgs.append(img)
if len(imgs) == 1:
image = imgs[0]
if len(imgs) >= 2: # Two images side-by-side
image = np.hstack((imgs[0], imgs[1]))
if len(imgs) >= 4: # Four images boxed
image2 = np.hstack((imgs[2], imgs[3]))
image = np.vstack((image, image2))
return imgs, image
def __init__(self, camera=CAMERA):
self.fps = 1
frame_time = 0
hisfps = [] # Historical FPS data
self.alprev = 0 # Prev alarm time
self.altrig = 0 # Alarm triggered, -1 authorized, 0 neutral, 1 triggered
freg = []
if len(camera) > 0:
from webcamvideostream import WebcamVideoStream
cams = [WebcamVideoStream(src=cam, resolution=(1280,720)).start() for cam in camera]
imgs = []
for i, cam in enumerate(cams):
# cam.set(cv2.CAP_PROP_BUFFERSIZE, 1) # Internal buffer will now store only x frames
img = cam.read()
# If no image is acquired
if (img is None):
# Black image
imgs.append(np.zeros((100,100,3), np.uint8))
elif (img.size == 0):
imgs.append(np.zeros((100,100,3), np.uint8))
else:
imgs.append(img)
# TEST, 4 camera simulation
# for i in range(3):
# imgs.append(img)
imgs, image = mainhuman_activity.preprocess(imgs, ROTATE)
# Face camera, not rendered on main image
if len(imgs) == 5:
im_h, im_w = imgs[4].shape[:2]
imf = imgs[4][round(im_h*FCAMCP[0]): round(im_h*FCAMCP[1]), round(im_w*FCAMCP[2]): round(im_w*FCAMCP[3])] # Crop
im_h, im_w = imf.shape[:2]
imf = cv2.resize(imf, dsize=(round(im_w/FCAMDS), round(im_h/FCAMDS)), interpolation=cv2.INTER_CUBIC) # Downsample
im_h, im_w = imf.shape[:2]
ky = 0 if im_h % 2 == 0 else 1
kx = 0 if im_w % 2 == 0 else 1
freg = [round(FCOFF[1]-im_h/2), round(FCOFF[1]+im_h/2)+ky, round(FCOFF[0]-im_w/2), round(FCOFF[0]+im_w/2)+kx]
else:
freg = FREG # Use cropped ceiling cams for face
else:
cams = []
print("No camera given, trying to use video instead.")
cap = cv2.VideoCapture(VIDEO, cv2.CAP_FFMPEG)
time.sleep(1)
if cap.isOpened() is False:
print("Error opening video stream or file")
return None
frame = 0
frame_skipped = 0
ret_val, image = cap.read()
freg = FREG # Use ceiling cams for face
self.im_h, self.im_w = image.shape[:2]
# print(h, w, c, h2, w2, c2)
###print("\n######################## Facerec")
if SYS_FACEREC:
facer = fr.face_recog(face_dir="./facerec/face/")
###print("\n######################## Darknet")
if SYS_DARK:
dark = dk.darknet_recog()
###print(dark.performDetect(image))
###print("\n######################## LSTM")
if SYS_ACT:
act = activity_human()
act.test()
###print("\n######################## Openpose")
if SYS_OPOSE:
opose = openpose_human(image)
# print("\n######################## Deepface")
# dface = df.face_recog()
# print(dface.run(image))
hold_face = 0
act_labs = []
act_confs = []
act_locs = []
sec_hist = []
sec_auths = {}
if DUMMY:
# Dummy pose
dimg = cv2.imread("images/TestPose.jpg")
doff_x = 0
doff_y = 30
rimg = cv2.imread("images/Background.png")
# For FPS calculation
ptime = time.time()
# Main loop
while True:
imgs = []
if len(camera) > 0:
for i, cam in enumerate(cams):
# Grab the frames AND do the heavy preprocessing for each camera
# ret_val, img = cam.read()
# For better synchronization on multi-cam setup:
# Grab the frames first without doing the heavy stuffs (decode, demosaic, etc)
# ret_val = cam.grab()
# The FIFO nature of the buffer means we can't get the latest frame
# Thus skip the earlier frames. Delay stats: 7s 8fps +artifact >>> 2s 3fps
# for i in range(5):
# ret_val = cam.grab()
# Multi-threading using WebcamVideoStream
img = cam.read()
###print(cam.grabbed)
# If no image is acquired
if (img is None):
# Black image
imgs.append(np.zeros((100,100,3), np.uint8))
elif (img.size == 0):
imgs.append(np.zeros((100,100,3), np.uint8))
else:
imgs.append(img)
# for i, cam in enumerate(cams):
# # Decode the captured frames
# ret_val, img = cam.retrieve()
# imgs.append(img)
# Skip frame if there's nothing
if (imgs is [None]):
continue
# # TEST, 4 camera simulation
# for i in range(3):
# imgs.append(img)
imgs, image = mainhuman_activity.preprocess(imgs, ROTATE)
# Face camera, not seen on main image
if len(imgs) == 5:
im_h, im_w = imgs[4].shape[:2]
imf = imgs[4][round(im_h*FCAMCP[0]): round(im_h*FCAMCP[1]), round(im_w*FCAMCP[2]): round(im_w*FCAMCP[3])] # Crop
im_h, im_w = imf.shape[:2]
imf = cv2.resize(imf, dsize=(round(im_w/FCAMDS), round(im_h/FCAMDS)), interpolation=cv2.INTER_CUBIC) # Downsample
else:
# Video mode
ret_val, image = cap.read()
# Skip frames to get realtime data representation
if frame_skipped < SKIP_FRAME:
frame += 1
frame_skipped += 1
continue
frame += 1
frame_skipped = 0
# Special smaller image for face recognition, reduces memory
if len(imgs) == 5:
imface = imf # Use face camera
else:
# Use cropped ceiling cams
imface = image[freg[0]:freg[1], freg[2]:freg[3]]
# Special masked image for openpose, reduce environment noise.
# Draw a polygon mask around unwanted area, for 4 cam mode
impose = image.copy()
if DOMASK:
for pmask in PMASK:
cv2.fillPoly(impose, [pmask], color=(200,200,288))
# cv2.fillPoly(impose, [pmask], color=(0,0,0))
# Dummy image
if DUMMY:
impose[0:IMAGE[1], 0:IMAGE[0]] = rimg
if (doff_x >= 0) and (doff_y >= 0) and (doff_x+dimg.shape[1] < IMAGE[0]) and (doff_y+dimg.shape[0] < IMAGE[1]):
impose[doff_y:doff_y+dimg.shape[0], doff_x:doff_x+dimg.shape[1]] = dimg
impose[doff_y+288:doff_y+dimg.shape[0]+288, 1024-(doff_x+dimg.shape[1]):1024-doff_x] = cv2.flip(dimg.copy(), 1)
else:
doff_x = 0
doff_y = 30
doff_x += int(round((1024-dimg.shape[1])/(3*4)))
# doff_y += int(round((576-dimg.shape[0])/(3*4)))
###print("\n######################## Openpose")
if SYS_OPOSE:
human_keypoints, human_ids, humans = opose.runopenpose(impose)
# print(humans, human_keypoints)
else:
human_keypoints = {0: [np.zeros(36)]}
human_ids = {0: 0}
humans = []
###print("\n######################## Darknet")
if SYS_DARK:
dobj = dark.performDetect(image)
###print(dobj)
else:
dobj = []
###print("\n######################## Facerec")
if SYS_FACEREC:
face_locs_tp, face_names_tp = facer.runinference(imface, tolerance=0.4, prescale=1/FPSCALE, upsample=FUP)
###print(face_locs_tp, face_names_tp)
else:
face_locs_tp = []
face_names_tp = []
# Prevent face blinking, apply the result if the new result is not empty.
if face_locs_tp or hold_face <= 0:
face_locs = face_locs_tp # Apply the results
face_names = face_names_tp
hold_face = HFACE # Reset counter
else:
hold_face -= 1
# print("\n######################## LSTM")
act_labs = []
act_confs = []
act_locs = []
if SYS_ACT:
for key, human_keypoint in human_keypoints.items():
###print(key, human_keypoint)
if(len(human_keypoint)==N_STEPS):
act.runinference(human_keypoint)
act_labs.append(act.action)
act_confs.append(act.conf)
loc = openpose_human.average([human_keypoint[N_STEPS-1]])
# loc here is produced with format [[x,y]], so must be passing [0]
act_locs.append(loc[0])
###print("\n######################## Maths")
sec_lv, sec_flv, sec_auths = self.sec_calc(sec_hist, image, act_labs, act_confs, human_keypoints, dobj, imface, face_names, face_locs, sec_auths)
###print(sec_lv)
self.alert(sec_lv, len(sec_auths))
###print("\n######################## Display")
# Main drawing procedure
if DRAWMASK:
# Draw openpose mask & face region
self.display_all(impose, imface, sec_lv, sec_auths, humans, human_ids, act_labs, act_confs, act_locs, dobj, face_locs, face_names, freg)
else:
self.display_all(image, imface, sec_lv, sec_auths, humans, human_ids, act_labs, act_confs, act_locs, dobj, face_locs, face_names, freg)
# Frame management stuffs, counted before frame limited
frame_time = time.time() - ptime
# FPS limiter
if FPSLIM > 0:
time.sleep(max(1./FPSLIM - (frame_time), 0))
# FPS display & log, counted after frame limited
self.fps = 1.0 / (time.time() - ptime)
hisfps.append(self.fps)
ptime = time.time()
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
# Output FPS history
fh = open("fps.txt", "w")
for fps in hisfps:
fh.write("%.3f \n" % fps)
fh.close()
def alert(self, sec_lv, sec_nauth):
if self.altrig == 0: # From neutral
# Alert & indicator about level below threshold
if sec_lv < HISTH:
winsound.PlaySound(None, winsound.SND_ASYNC)
winsound.PlaySound(ALSND, winsound.SND_ASYNC | winsound.SND_ALIAS)
self.altrig = 1 # To alert
self.alprev = time.time()
elif self.altrig == 1: # From alert
if time.time() > self.alprev + ALDUR:
self.altrig = 0 # To neutral
winsound.PlaySound(None, winsound.SND_ASYNC)
elif self.altrig == -1: # From cooldown period
if time.time() > self.alprev + ALAUTH:
self.altrig = 0 # To neutral
elif self.altrig == -2: # From authorized
# If none authorized
if sec_nauth == 0:
self.altrig = -1 # To cooldown period
self.alprev = time.time()
# Check authorization, nullify any security result if there's any authorized personnel
if sec_nauth > 0:
winsound.PlaySound(None, winsound.SND_ASYNC)
self.altrig = -2
def sec_calc(self, hist, image, act_labs, act_confs, human_keypoints, dobj, imface, face_names, face_locs, sec_auths, exth=EXTH):
# Pass components used for security level calculations
# TODO: implement threshold, constants, etc as variables
sec = security.Frame(act_labs, act_confs, dobj, face_names)
sec.calc()
# Add to historical record
# Base calculations from N latest data
hist.append(sec)
if (len(hist) > N_HIST):
# Remove the last, only the view changed, no copy created
hist.pop(0)
all_hist = len(hist)
# Calculation
lvs = []
for s in hist:
lvs.append(s.level)
print("%.3f " % s.level, end="")
print("| | ", end ="")
lvs = np.array(lvs)
if all_hist >= N_HIST:
if POSTPROC == 0: # Count if
sec_lv = len(lvs[lvs >= FRPARAM])/N_HIST
elif POSTPROC == 1: # Average
sec_lv = sum(lvs)/N_HIST
elif POSTPROC == 2: # Percentile
sec_lv = np.percentile(lvs, FRPARAM*100)
else:
sec_lv = 1.0
# print("%d/%d %.2f | " % (all_neg, all_hist, sec_lv), end="")
print("%.2f | " % (sec_lv), end="")
# Print latest labels & confidence
for act, conf in zip(act_labs, act_confs):
print("%s[%.2f]," % (act, conf), end="")
print()
# Authorized exiting
# Only check if there's no new face
if len(sec_auths) > 0 and len(face_names) == 0:
##print(human_keypoints)
for id, keys in human_keypoints.items(): # loc = (x,y)
###print(keys[-1], len(keys))
# Get the last pose, only if the sequence is longer than 1 (has detected before)
if len(keys) > 1:
pose = keys[-1]
(x, y) = (int(pose[2]), int(pose[3]-5)) # pose[2],pose[3] = (x,y) of body center (chest)
if (EX[2] <= x <= EX[3]) and (EX[0] <= y <= EX[1]):
# Get surrounding colors, by radius EXR
###print(loc[1]-EXR, loc[1]+EXR, loc[0]-EXR, loc[0]+EXR)
color = np.mean(image[y-EXR:y+EXR, x-EXR:x+EXR], axis=(0,1))
frac = {}
# Check against every detected authorized
for auth in sec_auths:
(b1, g1, r1) = sec_auths[auth]
(b2, g2, r2) = color
dist = math.sqrt((b2-b1)**2+(g2-g1)**2+(r2-r1)**2)
frac[auth] = (dist/441.67) # frac = dist/sqrt(255^2*3)
# Get the one with smallest distance
minkey = min(frac, key=frac.get)
if frac[minkey] <= EXTH: # Check to threshold
sec_auths.pop(minkey)
# Authorization, just need one positive to trigger
sec_flv = 0
for name, (top, right, bottom, left) in zip(face_names, face_locs):
if name != "Unknown":
sec_flv += 1
# Get color from the bottom row of imface
color = np.mean(imface[-1,left:right].copy(), axis=0)
sec_auths[name] = color # Designate that color to the person
# Percentage
return sec_lv, sec_flv, sec_auths
# Authorization, just need one positive to trigger
sec_flv = 0
for name, (top, right, bottom, left) in zip(face_names, face_locs):
if name != "Unknown":
sec_flv += 1
# Get color from the bottom row of imface
color = np.mean(imface[-1,left:right].copy(), axis=0)
sec_auths[name] = color # Designate that color to the person
# Percentage
return sec_lv, sec_flv, sec_auths
def display_all(self, image, imface, sec_lv, sec_auths, humans, human_ids, act_labs, act_confs, act_locs, objs, face_locs, face_names, freg=[]):
# try:
# from skimage import io, draw
# import numpy as np
# print("*** "+str(len(detections))+" Results, color coded by confidence ***")
vt = 10 # Vertical positioning
# Face camera display
image[freg[0]:freg[1], freg[2]:freg[3]] = imface # Insert to the center
# Face region display
if freg != []:
cv2.rectangle(image, (freg[2], freg[0]), (freg[3], freg[1]), color=(64,64,64), thickness=1)
# Exit region display
cv2.rectangle(image, (EX[2], EX[0]), (EX[3], EX[1]), color=(64,64,64), thickness=1)
# Openpose display
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# Security level display
color = (0, int(255 * sec_lv), int(255 * (1 - sec_lv)))
cv2.rectangle(image, (10, vt), (self.im_w-10,vt+10), (255, 255, 255), thickness=1)
cv2.rectangle(image, (10, vt), (int(round((self.im_w-20)*sec_lv)+10), vt+10), color, cv2.FILLED)
cv2.rectangle(image, (int(round((self.im_w-20)*HISTH)+10-1), vt-5), (int(round((self.im_w-20)*HISTH)+10)+1,vt+10+5), (0, 0, 255), cv2.FILLED)
vt += 30
# Visual safety level indicator
if self.altrig == 1: # Alert
cv2.rectangle(image, (0, 0), (self.im_w, self.im_h), (0, 0, 255), thickness=8)
elif self.altrig <= -1: # Authorized or cooldown
cv2.rectangle(image, (0, 0), (self.im_w, self.im_h), (0, 255, 0), thickness=8)
# Authorized names inside
ht = 10 # For horizontal
nvt = IMAGE[1]-10 # vt from bottom
cv2.putText(image,
"Auth: %2d |" % len(sec_auths),
(ht, nvt), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
ht += 85 # For horizontal
for name in sec_auths:
b, g, r = sec_auths[name]
cv2.rectangle(image, (ht-2, nvt-15), (ht+15-2, nvt+4), (b,g,r), thickness=-1)
cv2.rectangle(image, (ht-2, nvt-15), (ht+15-2, nvt+4), (255,255,255), thickness=1)
cv2.putText(image,
name[0], (ht, nvt),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255-b, 255-g, 255-r), 1)
ht += 15
# Extra stats
cv2.putText(image,
"SECURITY: %.0f%%" % (sec_lv*100),
(10, vt), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
vt += 20
cv2.putText(image,
"FPS: %.2f" % self.fps,
(10, vt), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
vt += 20
# LSTM display
for (act_lab, act_conf, act_loc, id_val) in zip(act_labs, act_confs, act_locs, human_ids.values()):
###print(act_lab, act_conf, act_loc, id_val)
cv2.putText(image,
" %d: %s %.2f" % (id_val, act_lab, act_conf),
(int(round(act_loc[0])), int(round(act_loc[1]))), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
# vt += 20
# Darknet display
for obj in objs:
###print(obj)
label = obj[0]
dconf = obj[1]
bounds = obj[2]
image, color = openpose_human.draw_box(image, 1, bounds, label, dconf)
# Facerec display
for (top, right, bottom, left), face in zip(face_locs, face_names):
###print(face)
label = face
# bounds = [4*left, 4*top, 4*(right-left), 4*(bottom-top)]
bounds = [freg[2]+FPSCALE*left, freg[0]+FPSCALE*top, FPSCALE*(right-left), FPSCALE*(bottom-top)]
image, color = openpose_human.draw_box(image, 0, bounds, label, loc=1)
cv2.imshow('Bedssys', image)
class openpose_human:
# def __init__(self, camera=0,resize='0x0',resize_out_ratio=4.0,model='mobilenet_thin',show_process=False):
def __init__(self, image, resize=OPSIZE, model='mobilenet_v2_small'):
self.logger = logging.getLogger('TfPoseEstimator-WebCam')
self.logger.setLevel(logging.DEBUG)
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.DEBUG)
self.formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
self.ch.setFormatter(self.formatter)
self.logger.addHandler(self.ch)
##self.logger.debug('initialization %s : %s' % (model, get_graph_path(model)))
self.w, self.h = model_wh(resize)
if self.w > 0 and self.h > 0:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=OPGPU) # Allocate GPU fraction
self.e = TfPoseEstimator(get_graph_path(model), target_size=(self.w, self.h), tf_config=tf.ConfigProto(gpu_options=gpu_options))
else:
self.e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
##self.logger.debug('cam read+')
# cam = cv2.VideoCapture(camera)
# ret_val, image = cam.read()
self.im_h, self.im_w = image.shape[:2]
# logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
self.videostep = 0
self.human_keypoint = {0: [np.zeros(36)]}
self.human_ids = {0: 0}
def runopenpose(self, image, resize_out_ratio=4.0):
# ret_val, image = cam.read()
##self.logger.debug('image process+')
humans = self.e.inference(image, resize_to_default=(self.w > 0 and self.h > 0), upsample_size=resize_out_ratio)
skeletoncount = 0
skels = np.array([np.zeros(36)])
for human in humans:
if skeletoncount == 0: # Initialize by adding N_STEPS of empty skeletons
skels = np.array([openpose_human.write_coco_json(human, self.im_w,self.im_h)])
else: # Append the rest
skels = np.vstack([skels, np.array(openpose_human.write_coco_json(human, self.im_w,self.im_h))])
skeletoncount = skeletoncount + 1
# if skeletoncount == 1: # Just assume it's the same prson if there's only one
# self.human_keypoint[0].append(skels)
if skeletoncount > 0:
self.human_keypoint, self.human_ids = openpose_human.push(self.human_keypoint, self.human_ids, skels)
else:
# No human actually detected (humans is empty, thus skcount = 0)
self.human_keypoint = {0: [np.zeros(36)]}
self.human_ids = {0: 0}
tf.reset_default_graph() # Reset the graph
# self.logger.debug('finished+')
return (self.human_keypoint, self.human_ids, humans)
# Basically, human_keypoint store a string of poses, length N_STEPS, and tracked.
# Humans is the result of a single inference, formatting still raw.
def draw_box(image, coord_type, bounds, text='', conf=1, loc=0, thickness=3):
# Based on the input detection coordinate
if coord_type == 0:
# Input (x, y) describes the top-left corner of detection
x = int(bounds[0])
y = int(bounds[1])
else: # Input (x, y) describes the center of detection
# Move it to the top-left corner
x = int(bounds[0] - bounds[2]/2)
y = int(bounds[1] - bounds[3]/2)
w = int(bounds[2])
h = int(bounds[3])
color = (int(255 * (1 - (conf ** 2))), int(255 * (conf ** 2)), 0)
# cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])
cv2.rectangle(image, (x, y), (x+w, y+h), color, thickness)
# Object text
if loc == 0:
cv2.putText(image, "%s %.2f" % (text, conf), (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
elif loc == 1:
cv2.putText(image, "%s %.2f" % (text, conf), (x, y+h+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return image, color
def write_coco_json(human, image_w, image_h):
keypoints = []
coco_ids = coco_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
for coco_id in coco_ids:
if coco_id not in human.body_parts.keys():
keypoints.extend([0, 0])
continue
body_part = human.body_parts[coco_id]
keypoints.extend([round(body_part.x * image_w, 3), round(body_part.y * image_h, 3)])
return keypoints
def push(traces, ids, new_skels, THRESHOLD = 100, TRACE_SIZE = N_STEPS):
###print("##### Multi-human")
"""Add the keypoints from a new frame into the buffer."""
# dists, neighbors = openpose_human.nearest_neighbors(traces, new_skels)
dists, neighbors = openpose_human.point(traces, new_skels)
keygen = []
# New skeletons which aren't close to a previously observed skeleton:
unslotted = []
# Previously observed skeletons which aren't close to a new one:
for each in traces.keys():
keygen.append(each)
unseen = set(keygen)
for skel, dist, neighbor in zip(new_skels, dists, neighbors):
###print(dist, neighbor)
if dist <= THRESHOLD:
if neighbor in traces:
traces[neighbor].append(skel)
else:
id = randint(0,100) # Only used for naming
traces[neighbor] = []
traces[neighbor].append(skel)
ids[neighbor] = id
if len(traces[neighbor]) > TRACE_SIZE:
traces[neighbor].pop(0)
unseen.discard(neighbor)
else:
unslotted.append(skel)
for i in unseen:
del traces[i]
del ids[i]
# Indices we didn't match, and the rest of the numbers are fair game
availible_slots = chain(sorted(unseen), count(len(traces)))
for slot, skel in zip(availible_slots, unslotted):
id = randint(0,100) # Only used for naming
if slot in traces:
traces[slot].append(skel)
else:
traces[slot] = []
traces[slot].append(skel)
ids[slot] = id
return traces, ids
def point(traces, skels, TRACE_IDX = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]):
if not traces: # First pass
return np.zeros(len(skels)), np.arange(len(skels))
prev = np.array([ # Pull the most recent location of each skeleton, [-1] means get 1 data from behind
coords[-1][TRACE_IDX] for _, coords in sorted(traces.items())])
curr = skels[:, TRACE_IDX]
# Determine representative point, may use various method such as median, average, etc
prev_point = openpose_human.average(prev)
curr_point = openpose_human.average(curr)
# N is typically small (< 40) so brute force is fast
nn_model = NearestNeighbors(n_neighbors=1, algorithm='brute')
nn_model.fit(prev_point)
dist, nn = nn_model.kneighbors(curr_point, return_distance=True)
return dist.flatten(), nn.flatten()
def average(skels):
avg_skels = np.empty((0, 2))
for skel in skels:
# Remember that a point might not be detected, giving zero. Count the non-zero.
# Below line is equivalent to COUNTIF(not-zero).
# Count non-zeros
nzero_x = sum(1 if (x != 0) else 0 for x in skel[SKX])
nzero_y = sum(1 if (x != 0) else 0 for x in skel[SKY])
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
x = sum(skel[SKX]) / nzero_x
y = sum(skel[SKY]) / nzero_y
avg_skels = np.vstack((avg_skels, np.array([x, y])))
return avg_skels
def nearest_neighbors(traces, skels, TRACE_IDX = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]):
if not traces: # First pass
return np.zeros(len(skels)), np.arange(len(skels))
prev = np.array([ # Pull the most recent location of each skeleton
coords[-1][TRACE_IDX] for _, coords in sorted(traces.items())])
curr = skels[:, TRACE_IDX]
# N is typically small (< 40) so brute force is fast
nn_model = NearestNeighbors(n_neighbors=1, algorithm='brute')
nn_model.fit(prev)
dist, nn = nn_model.kneighbors(curr, return_distance=True)
return dist.flatten(), nn.flatten()
class activity_human:
action = "null"
conf = 0
loc = []
# LABELS = [
# "JUMPING",
# "JUMPING_JACKS",
# # "BOXING",
# "WAVING_2HANDS",
# "WAVING_1HAND",
# "CLAPPING_HANDS"
# ]
def __init__(self):
self.LABELS = LABELS
self.n_input = 36
self.n_hidden = 36 # Hidden layer num of features
# n_classes = 6
n_classes = len(self.LABELS)
# N_STEPS = 32
#updated for learning-rate decay
# calculated as: decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
decaying_learning_rate = True
learning_rate = 0.0025 #used if decaying_learning_rate set to False
init_learning_rate = 0.005
decay_rate = 0.96 #the base of the exponential in the decay
decay_steps = 100000 #used in decay every 60000 steps with a base of 0.96
global_step = tf.Variable(0, trainable=False)
lambda_loss_amount = 0.0015
# training_iters = training_data_count *300 # Loop 300 times on the dataset, ie 300 epochs
# training_iters = training_data_count *60
# training_iters = training_data_count *120
# training_iters = training_data_count *1
batch_size = 512
display_iter = batch_size*8 # To show test set accuracy during training
#### Build the network
# Graph input/output
self.x = tf.placeholder(tf.float32, [None, N_STEPS, self.n_input])
self.y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([self.n_input, self.n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([self.n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
self.pred = activity_human.LSTM_RNN(self, self.x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.pred)) + l2 # Softmax loss
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) #exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost,global_step=global_step) # Adam Optimizer
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=LSGPU) # Allocate GPU fraction
self.sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True, gpu_options=gpu_options))
# self.sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
# self.sess = tf.self.session(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
# training_iters = training_data_count *30
#create saver before training
saver = tf.train.Saver()
# saver = tf.train.Saver(var_list={'wh':weights['hidden'], 'wo':weights['out'], 'bh':biases['hidden'], 'bo':biases['out']})
# tf.reset_default_graph()
load = True
train = False
update = False
#check if you want to retrain or import a saved model
if load:
saver.restore(self.sess, DATASET_PATH + "model.ckpt")
###print("Model restored.")
correct_pred = tf.equal(tf.argmax(self.pred,1), tf.argmax(self.y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Load the networks inputs
def runinference(self, human_keypoint):
time_start = time.time()
##### Inferencing
# X_infer_path = "utilities/something/something.txt"
# X_infer_path = DATASET_PATH + "X_test.txt"
# X_val = load_X(X_infer_path)
X_test = activity_human.load_XLive(human_keypoint)
# print("##### Raw")
# for arr in human_keypoint:
# for i in arr:
# print(i, end=", ")
# print()
# print("##### Preprocessed")
# if len(X_test) > 0:
# for arr in X_test[0]:
# for i in arr:
# print(i, end=", ")
# print()
self.preds = self.sess.run(
[self.pred],
feed_dict={
self.x: X_test
}
)
# Special selection (temporary confidence)
tconf = self.preds[0][0].copy()
if LABSEL[0]:
# Weighted
tconf *= LABWEI
if LABSEL[1]:
# Grouped
avgs = [sum(tconf[gr])/len(gr) for gr in LABGRO]
imax = avgs.index(max(avgs))
# The losing group is nullified
zeros = np.zeros(len(tconf))
zeros[LABGRO[imax]] = 1
tconf *= zeros
# Basic selection
id, self.conf = max(enumerate(tconf), key=operator.itemgetter(1))
self.action = self.LABELS[id]
###print(tconf)
###print(self.preds, self.action)
time_stop = time.time()
###print("TOTAL TIME: {}".format(time_stop - time_start))
def load_X(X_path):
file = open(X_path, 'r')
X_ = np.array(
[elem for elem in [
row.split(',') for row in file
]],
dtype=np.float32
)
file.close()
blocks = int(len(X_) / N_STEPS)
X_ = np.array(np.split(X_,blocks))
return X_
# Load the networks inputs
def load_XLive(keypoints):
# print(keypoints)
# print(len(keypoints), ":", [len(row) for row in keypoints])
X_ = np.array(keypoints,dtype=np.float32)
blocks = int(len(X_) / N_STEPS)
X_ = np.array(np.split(X_,blocks))
# Idle check & forcing it if it is.
if PREPROC[1] == 1:
X_[0] = activity_human.idlenull(X_[0])
elif PREPROC[1] == 2:
X_[0] = activity_human.idlenull2(X_[0])
# Preprocessing before the data is used for inference
# The data is: [ [ [point x 36] x N_STEPS] ], so one too many layer
if PREPROC[0] == 1:
# Poses emulated as if there's a big border between sub-images
X_[0] = activity_human.amplify(X_[0])
elif PREPROC[0] == 2:
# Individual pose returned to origin
X_[0] = activity_human.normalize(X_[0])
elif PREPROC[0] == 3:
# Every pose in a gesture will be relative to the first in the gesture
X_[0] = activity_human.normalizeonce(X_[0])
elif PREPROC[0] == 4:
# Every pose in a gesture will be relative to the first in the gesture
X_[0] = activity_human.normalizepoint(X_[0])
elif PREPROC[0] == 5:
# Poses in 4 sub-images emulated as if happening in a single image
X_[0] = activity_human.reverse(X_[0])
return X_
def idlenull2(skels):
# Preprocess, force any unmoving gesture to be idle
diff_x = 0
diff_y = 0
n = 5
for i, skel in enumerate(skels):
# Calculate the midpoint representation,
# using primary points.
# Try pose part #1, neck
ax = skel[2]
ay = skel[3]
if ax==0 and ay==0:
# Then try pose part #0, nose
ax = skel[0]
ay = skel[1]
# Calculate then sum overall movement
if i != 0:
diff_x += abs(ax - px)
diff_y += abs(ay - py)
px = ax
py = ay
# Average the diff and calculate the distance
diff_x /= n-1
diff_y /= n-1
diff = math.sqrt(diff_x**2 + diff_y**2)
if diff < IDLETH:
# All to zero, tested that it's guaranteed to be inferenced as idle (tho low confidence).
skels = np.array(N_STEPS * [[478,62,476,78,492,80,494,108,494,132,458,76,442,100,440,128,478,128,474,158,476,188,454,126,442,158,426,194,480,60,476,60,484,62,474,60]], dtype=np.float32)
return skels
def idlenull(skels):
# Preprocess, force any unmoving gesture to be idle
diff_x = 0
diff_y = 0
n = 5
for i, skel in enumerate(skels):
# Calculate the midpoint representation, using average
# (Exact copy from average function)
# Remember that a point might not be detected, giving zero. Count the non-zero.
# Below line is equivalent to COUNTIF(not-zero).
x = skel[SKX]
y = skel[SKY]
# Count non-zeros
nzero_x = sum(1 if (k != 0) else 0 for k in x)
nzero_y = sum(1 if (k != 0) else 0 for k in y)
if (nzero_x == 0 and nzero_y == 0):
n -= 1
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
ax = sum(x) / nzero_x
ay = sum(y) / nzero_y
# Calculate then sum overall movement
if i != 0:
diff_x += abs(ax - px)
diff_y += abs(ay - py)
px = ax
py = ay
# Average the diff and calculate the distance
diff_x /= n-1
diff_y /= n-1
diff = math.sqrt(diff_x**2 + diff_y**2)
if diff < IDLETH:
# All to zero, tested that it's guaranteed to be inferenced as idle (tho low confidence).
skels = np.array(N_STEPS * [[478,62,476,78,492,80,494,108,494,132,458,76,442,100,440,128,478,128,474,158,476,188,454,126,442,158,426,194,480,60,476,60,484,62,474,60]], dtype=np.float32)
return skels
def normalizepoint(skels):
# Preprocess, move any pose to the origin, based on their average as midpoint ref.
# Still using old normalization method instead of the one used in normalizeonce.
for i, skel in enumerate(skels):
# Calculate the midpoint representation, using average
x = skel[SKX]
y = skel[SKY]
if (i == 0):
xo = x.copy()
yo = y.copy()
# Normalization process
# Shifting first pose to origin, and the rest follow the same shift
zero = [0 if (k == 0) else 1 for k in skel] # As the multiplier, zero stays zero
x -= xo
y -= yo
# Recombine, placed one after another
skel[0::2] = x
skel[1::2] = y
skel *= zero
return skels
def normalizeonce(skels):
first = False
# Preprocess, move any pose to the origin, based on their average as midpoint ref.
for skel in skels:
# Calculate the midpoint representation, using average
# Similar copy from average function
x = skel[SKX]
y = skel[SKY]
if (first == False):
# Remember that a point might not be detected, giving zero.
# Count the non-zero.
nzero_x = sum(1 if (k != 0) else 0 for k in x)
nzero_y = sum(1 if (k != 0) else 0 for k in y)
if (nzero_x == 0) and (nzero_y == 0):
first = False
else:
first = True
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
ax = sum(x) / nzero_x
ay = sum(y) / nzero_y
if (first == True):
# Normalization process
# Shifting first pose to origin, and the rest follow the same shift
zero = [0 if (k == 0) else 1 for k in skel] # As the multiplier, zero stays zero
x -= ax
y -= ay
# Recombine, placed one after another
skel[0::2] = x
skel[1::2] = y
skel *= zero
return skels
def normalize(skels):
# Preprocess, move any pose to the origin, based on their average as midpoint ref.
for skel in skels:
# Calculate the midpoint representation, using average
# Similar copy from average function
x = skel[SKX]
y = skel[SKY]
# Remember that a point might not be detected, giving zero.
# Count the non-zero.
nzero_x = sum(1 if (k != 0) else 0 for k in x)
nzero_y = sum(1 if (k != 0) else 0 for k in y)
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
ax = sum(x) / nzero_x
ay = sum(y) / nzero_y
# Normalization process
# Shifting every poses to origin
zero = [0 if (k == 0) else 1 for k in skel] # As the multiplier, zero stays zero
x -= ax
y -= ay
# Recombine, placed one after another
skel[0::2] = x
skel[1::2] = y
skel *= zero
return skels
def amplify(skels):
# Preprocess, move any pose in different quadrant way further from each other.
for skel in skels:
# Similar copy from average function
x = skel[SKX]
y = skel[SKY]
# Remember that a point might not be detected, giving zero.
# Count the non-zero.
nzero_x = sum(1 if (k != 0) else 0 for k in x)
nzero_y = sum(1 if (k != 0) else 0 for k in y)
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
ax = sum(x) / nzero_x
ay = sum(y) / nzero_y
# Amplification process
# Shifting constant, to be added to the skeletons
sx = POSEAMP if (ax > SUBIM[0]) else 0
sy = POSEAMP if (ay > SUBIM[1]) else 0
zero = [0 if (x == 0) else 1 for x in skel] # As the multiplier, zero stays zero
x += sx
y += sy
# Recombine, placed one after another
skel[0::2] = x
skel[1::2] = y
skel *= zero
return skels
def reverse(skels):
# Reverse of amplify
# Preprocess, move any pose in different quadrant as if it's happening in single quadrant.
for skel in skels:
# Similar copy from average function
x = skel[SKX]
y = skel[SKY]
# Remember that a point might not be detected, giving zero.
# Count the non-zero.
nzero_x = sum(1 if (k != 0) else 0 for k in x)
nzero_y = sum(1 if (k != 0) else 0 for k in y)
if (nzero_x == 0):
nzero_x = 1
if (nzero_y == 0):
nzero_y = 1
ax = sum(x) / nzero_x
ay = sum(y) / nzero_y
# Amplification process
# Shifting constant, to be added to the skeletons
sx = SUBIM[0] if (ax >= SUBIM[0]) else 0
sy = SUBIM[1] if (ay >= SUBIM[1]) else 0
zero = [0 if (x == 0) else 1 for x in skel] # As the multiplier, zero stays zero
x -= sx
y -= sy
# Recombine, placed one after another
skel[0::2] = x
skel[1::2] = y
skel *= zero
return skels
def load_y(y_path):
file = open(y_path, 'r')
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# for 0-based indexing
return y_ - 1
def LSTM_RNN(self, _X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute N_STEPS and batch_size
_X = tf.reshape(_X, [-1, self.n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, N_STEPS, 0)
if LAYER == 1:
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
elif LAYER == 2:
# Single hidden layer
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled = list(_unsampled)
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
def test(self):
X_train_path = DATASET_PATH + "X_train.txt"
X_test_path = DATASET_PATH + "X_test.txt"
y_train_path = DATASET_PATH + "Y_train.txt"
y_test_path = DATASET_PATH + "Y_test.txt"
X_train = activity_human.load_X(X_train_path)
X_test = activity_human.load_X(X_test_path)
y_train = activity_human.load_y(y_train_path)
y_test = activity_human.load_y(y_test_path)
# only perform testing - on training set
loss, acc = self.sess.run(
[self.cost, self.accuracy],
feed_dict={
self.x: X_train,
self.y: activity_human.one_hot(y_train)
}
)
print()
print("PERFORMANCE ON TRAIN SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
# only perform testing - on test set
loss, acc = self.sess.run(
[self.cost, self.accuracy],
feed_dict={
self.x: X_test,
self.y: activity_human.one_hot(y_test)
}
)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
print()
if __name__ == '__main__':
mainhuman_activity()
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2019 Ha Thach (tinyusb.org)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
/*
* Copyright (c) 2022 hpmicro
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include "board_api.h"
#include "uf2.h"
#include "tusb.h"
/*--------------------------------------------------------------------*/
/* MACRO CONSTANT TYPEDEF PROTYPES */
/*--------------------------------------------------------------------*/
uint8_t const RGB_USB_UNMOUNTED[] = { 0xff, 0x00, 0x00 }; /* Red */
uint8_t const RGB_USB_MOUNTED[] = { 0x00, 0xff, 0x00 }; /* Green */
uint8_t const RGB_WRITING[] = { 0xcc, 0x66, 0x00 };
uint8_t const RGB_ENTER_BOOTLOADER[] = { 0x80, 0x00, 0xff }; /* Purple */
uint8_t const RGB_UNKNOWN[] = { 0x00, 0x00, 0x88 }; /* for debug */
uint8_t const RGB_OFF[] = { 0x00, 0x00, 0x00 };
static volatile uint32_t _timer_count;
static bool check_dfu_mode(void);
int main(void)
{
uf2_board_init();
TU_LOG1("TinyUF2\r\n");
/* if not DFU mode, jump to App */
if (!check_dfu_mode()) {
TU_LOG1("Jump to application @0x%x(0x%x)\r\n", BOARD_UF2_APP_START, *(volatile uint32_t *)BOARD_UF2_APP_START);
uf2_board_app_jump();
while (1) {
}
}
TU_LOG1("Start DFU mode\r\n");
uf2_board_dfu_init();
uf2_board_flash_init();
uf2_init();
tusb_init();
indicator_set(STATE_USB_UNPLUGGED);
#if (CFG_TUSB_OS == OPT_OS_NONE)
while (1) {
tud_task();
}
#endif
}
/* return true if start DFU mode, else App mode */
static bool check_dfu_mode(void)
{
if (!uf2_board_enter_bootloader()) {
return true;
}
if (!uf2_board_app_valid()) {
return true;
}
return false;
}
/*--------------------------------------------------------------------*/
/* Device callbacks */
/*--------------------------------------------------------------------*/
/* Invoked when device is plugged and configured */
void tud_mount_cb(void)
{
indicator_set(STATE_USB_PLUGGED);
}
/* Invoked when device is unplugged */
void tud_umount_cb(void)
{
indicator_set(STATE_USB_UNPLUGGED);
}
/*--------------------------------------------------------------------*/
/* USB HID */
/*--------------------------------------------------------------------*/
/* Invoked when received GET_REPORT control request */
/* Application must fill buffer report's content and return its length. */
/* Return zero will cause the stack to STALL request */
uint16_t tud_hid_get_report_cb(uint8_t itf, uint8_t report_id, hid_report_type_t report_type, uint8_t *buffer, uint16_t reqlen)
{
/* TODO not Implemented */
(void) itf;
(void) report_id;
(void) report_type;
(void) buffer;
(void) reqlen;
return 0;
}
/* Invoked when received SET_REPORT control request or */
/* received data on OUT endpoint (Report ID = 0, Type = 0) */
void tud_hid_set_report_cb(uint8_t itf, uint8_t report_id, hid_report_type_t report_type, uint8_t const *buffer, uint16_t bufsize)
{
/* This example doesn't use multiple report and report ID */
(void) itf;
(void) report_id;
(void) report_type;
(void) buffer;
(void) bufsize;
}
/*--------------------------------------------------------------------*/
/* Indicator */
/*--------------------------------------------------------------------*/
static uint32_t _indicator_state = STATE_BOOTLOADER_STARTED;
static uint8_t _indicator_rgb[3];
void indicator_set(uint32_t state)
{
_indicator_state = state;
switch (state) {
case STATE_USB_UNPLUGGED:
uf2_board_timer_start(1);
memcpy(_indicator_rgb, RGB_USB_UNMOUNTED, 3);
uf2_board_pwm_rgb_write(_indicator_rgb);
break;
case STATE_USB_PLUGGED:
uf2_board_timer_start(5);
memcpy(_indicator_rgb, RGB_USB_MOUNTED, 3);
uf2_board_pwm_rgb_write(_indicator_rgb);
break;
case STATE_WRITING_STARTED:
uf2_board_timer_start(25);
memcpy(_indicator_rgb, RGB_WRITING, 3);
break;
case STATE_WRITING_FINISHED:
uf2_board_timer_stop();
uf2_board_pwm_rgb_write(RGB_WRITING);
break;
default:
break; /* nothing to do */
}
}
void uf2_board_timer_handler(void)
{
bool is_on;
_timer_count++;
switch (_indicator_state) {
case STATE_USB_UNPLUGGED:
case STATE_USB_PLUGGED:
uf2_board_pwm_rgb_write(_indicator_rgb);
break;
case STATE_WRITING_STARTED:
/* Fast toggle with both LED and RGB */
is_on = _timer_count & 0x01;
/* blink RGB if available */
uf2_board_pwm_rgb_write(is_on ? _indicator_rgb : RGB_OFF);
break;
default:
break; /* nothing to do */
}
}
|
import random
random.seed(0)
import os
import re
from prjxray import util
CLBN = 400
print('//Requested CLBs: %s' % str(CLBN))
def gen_slices():
for _tile_name, site_name, _site_type in util.gen_sites(['SLICEL',
'SLICEM']):
yield site_name
DIN_N = CLBN * 8
DOUT_N = CLBN * 8
print(
'''
module top(input clk, stb, di, output do);
localparam integer DIN_N = %d;
localparam integer DOUT_N = %d;
reg [DIN_N-1:0] din;
wire [DOUT_N-1:0] dout;
reg [DIN_N-1:0] din_shr;
reg [DOUT_N-1:0] dout_shr;
always @(posedge clk) begin
din_shr <= {din_shr, di};
dout_shr <= {dout_shr, din_shr[DIN_N-1]};
if (stb) begin
din <= din_shr;
dout_shr <= dout;
end
end
assign do = dout_shr[DOUT_N-1];
roi roi (
.clk(clk),
.din(din),
.dout(dout)
);
endmodule
''' % (DIN_N, DOUT_N))
f = open('params.csv', 'w')
f.write('module,loc,n\n')
slices = gen_slices()
print(
'module roi(input clk, input [%d:0] din, output [%d:0] dout);' %
(DIN_N - 1, DOUT_N - 1))
for i in range(CLBN):
# Don't have an O6 example
modules = ['clb_NOUTMUX_' + x for x in ['CY', 'F78', 'O5', 'XOR', 'B5Q']]
module = random.choice(modules)
if module == 'clb_NOUTMUX_F78':
n = random.randint(0, 2)
else:
n = random.randint(0, 3)
#n = 0
loc = next(slices)
print(' %s' % module)
print(' #(.LOC("%s"), .N(%d))' % (loc, n))
print(
' clb_%d (.clk(clk), .din(din[ %d +: 8]), .dout(dout[ %d +: 8]));'
% (i, 8 * i, 8 * i))
f.write('%s,%s,%s\n' % (module, loc, n))
f.close()
print(
'''endmodule
// ---------------------------------------------------------------------
''')
print(
'''
module myLUT8 (input clk, input [7:0] din,
output lut8o, output lut7bo, output lut7ao,
//caro: XOR additional result (main output)
//carco: CLA result (carry module additional output)
output caro, output carco,
output bo5, output bo6,
//Note: b5ff_q requires the mux and will conflict with other wires
//Otherwise this FF drops out
output wire ff_q);
//output wire [3:0] n5ff_q);
parameter N=-1;
parameter LOC="SLICE_FIXME";
wire [3:0] caro_all;
assign caro = caro_all[N];
wire [3:0] carco_all;
assign carco = carco_all[N];
wire [3:0] lutno6;
assign bo6 = lutno6[N];
wire [3:0] lutno5;
assign bo5 = lutno5[N];
//Outputs does not have to be used, will stay without it
(* LOC=LOC, BEL="F8MUX", KEEP, DONT_TOUCH *)
MUXF8 mux8 (.O(lut8o), .I0(lut7bo), .I1(lut7ao), .S(din[6]));
(* LOC=LOC, BEL="F7BMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7b (.O(lut7bo), .I0(lutno6[3]), .I1(lutno6[2]), .S(din[6]));
(* LOC=LOC, BEL="F7AMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7a (.O(lut7ao), .I0(lutno6[1]), .I1(lutno6[0]), .S(din[6]));
(* LOC=LOC, BEL="D6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_DEAD_0000_0001)
) lutd (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[3]),
.O6(lutno6[3]));
(* LOC=LOC, BEL="C6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_BEEF_0000_0001)
) lutc (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[2]),
.O6(lutno6[2]));
(* LOC=LOC, BEL="B6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_CAFE_0000_0001)
) lutb (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[1]),
.O6(lutno6[1]));
(* LOC=LOC, BEL="A6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_1CE0_0000_0001)
) luta (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[0]),
.O6(lutno6[0]));
//Outputs do not have to be used, will stay without them
(* LOC=LOC, KEEP, DONT_TOUCH *)
CARRY4 carry4(.O(caro_all), .CO(carco_all), .DI(lutno5), .S(lutno6), .CYINIT(1'b0), .CI());
generate
if (N == 3) begin
(* LOC=LOC, BEL="D5FF", KEEP, DONT_TOUCH *)
FDPE d5ff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(lutno5[3]));
end
if (N == 2) begin
(* LOC=LOC, BEL="C5FF", KEEP, DONT_TOUCH *)
FDPE c5ff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(lutno5[2]));
end
if (N == 1) begin
(* LOC=LOC, BEL="B5FF", KEEP, DONT_TOUCH *)
FDPE b5ff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(lutno5[1]));
end
if (N == 0) begin
(* LOC=LOC, BEL="A5FF", KEEP, DONT_TOUCH *)
FDPE a5ff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(lutno5[0]));
end
endgenerate
endmodule
//******************************************************************************
//BOUTMUX tests
module clb_NOUTMUX_CY (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din), .lut8o(),
.caro(), .carco(dout[0]),
.bo5(), .bo6(),
.ff_q());
endmodule
//clb_NOUTMUX_F78: already have above as clb_LUT8
module clb_NOUTMUX_F78 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
wire lut8o, lut7bo, lut7ao;
/*
D: N/A (no such mux position)
C: F7B:O
B: F8:O
A: F7A:O
*/
generate
if (N == 3) begin
//No muxes, so this is undefined
invalid_configuration invalid_configuration3();
end else if (N == 2) begin
assign dout[0] = lut7bo;
end else if (N == 1) begin
assign dout[0] = lut8o;
end else if (N == 0) begin
assign dout[0] = lut7ao;
end
endgenerate
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(lut8o), .lut7bo(lut7bo), .lut7ao(lut7ao),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q());
endmodule
module clb_NOUTMUX_O5 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din), .lut8o(),
.caro(), .carco(),
.bo5(dout[0]), .bo6(),
.ff_q());
endmodule
/*
//FIXME: need to force it to use both X and O6
module clb_NOUTMUX_O6 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din), .lut8o(), .co(), .carco(), .bo5(), .bo6());
endmodule
*/
module clb_NOUTMUX_XOR (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din), .lut8o(),
.caro(dout[0]), .carco(),
.bo5(), .bo6(),
.ff_q());
endmodule
module clb_NOUTMUX_B5Q (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=1;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q(dout[0]));
endmodule
''')
|
#pragma once
#include "FreqViz.h"
#include "KnobsComponent.h"
template <class Processor>
class PluginEditor : public AudioProcessorEditor
{
public:
PluginEditor (Processor&);
void resized() override;
private:
Processor& processor;
FreqViz freqViz;
KnobsComponent knobs;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (PluginEditor)
};
|
describe('React script injection', () => {
it('should inject appOne script', () => {
cy.visit('http://localhost:8123');
/**
* App one should load
*/
cy.contains('app-one').click();
cy.get('div#app-one-root').should('exist');
cy.get('script#appOne').should('exist');
/**
* App two should load
*/
cy.contains('app-two').click();
cy.get('div#app-one-root').should('not.exist');
cy.get('div#app-two-root').should('exist');
cy.get('script#appTwo').should('exist');
});
});
|
const expect = require('chai').expect
const helper = require('../../../helpers/data/aggregated-data-helper')
const getCaseProgress = require('../../../../app/services/data/get-caseload-progress')
let inserts = []
const baseCaseProgressRow = {
communityLast16Weeks: 10,
licenseLast16Weeks: 9,
totalCases: 5,
warrantsTotal: 240,
overdueTerminationsTotal: 240,
unpaidWorkTotal: 240
}
describe('services/data/get-org-unit-caseload-progress', function () {
before(function () {
return helper.addCaseProgressDataForAllOrgUnits()
.then(function (builtInserts) {
inserts = builtInserts
})
})
it('should retrieve current caseload progress for an offender manager', function () {
return getCaseProgress(inserts.filter((item) => item.table === 'workload_owner')[0].id, 'offender-manager')
.then(function (results) {
const omResults = [
helper.rowGenerator('Test_Forename Test_Surname', baseCaseProgressRow)
]
expect(results).to.eql(omResults)
})
})
it('should retrieve current caseload progress for all workload owners on a team with one OM', function () {
const teamWithOneOffenderManager = [
helper.rowGenerator('Test_Forename Test_Surname', baseCaseProgressRow)
]
return getCaseProgress(inserts.filter((item) => item.table === 'team')[1].id, 'team')
.then(function (results) {
expect(results.length).to.eql(1)
expect(results).to.eql(teamWithOneOffenderManager)
})
})
it('should retrieve current caseload progress for all workload owners on a team with mutiple OMs', function () {
const teamWithMultipleOffenderManagers = [
helper.rowGenerator('Test_Forename Test_Surname', baseCaseProgressRow),
helper.rowGenerator('Test_Forename Test_Surname', baseCaseProgressRow)
]
return getCaseProgress(inserts.filter((item) => item.table === 'team')[0].id, 'team')
.then(function (results) {
expect(results.length).to.eql(2)
expect(results).to.eql(teamWithMultipleOffenderManagers)
})
})
it('should retrieve current caseload progress for all teams in an LDU with multiple teams', function () {
const lduWithMultipleTeams = [
helper.rowGenerator('Test Team', baseCaseProgressRow, 2),
helper.rowGenerator('Test Team', baseCaseProgressRow)
]
return getCaseProgress(inserts.filter((item) => item.table === 'ldu')[0].id, 'ldu')
.then(function (results) {
expect(results.length).to.eql(2)
expect(results).to.eql(lduWithMultipleTeams)
})
})
it('should retrieve current caseload progress for all teams in an LDU with one team', function () {
const lduWithOneTeam = [
helper.rowGenerator('Test Team', baseCaseProgressRow)
]
return getCaseProgress(inserts.filter((item) => item.table === 'ldu')[1].id, 'ldu')
.then(function (results) {
expect(results.length).to.eql(1)
expect(results).to.eql(lduWithOneTeam)
})
})
it('should retrieve current caseload progress for all LDUs in a region', function () {
const regionWithTwoLdus = [
helper.rowGenerator('Test LDU', baseCaseProgressRow, 3),
helper.rowGenerator('Test LDU', baseCaseProgressRow)
]
return getCaseProgress(inserts.filter((item) => item.table === 'region')[0].id, 'region')
.then(function (results) {
expect(results.length).to.eql(2)
expect(results).to.eql(regionWithTwoLdus)
})
})
it('should retrieve current caseload progress for all regions in the system', function () {
return getCaseProgress(undefined, 'hmpps')
.then(function (results) {
expect(results.length).to.be.greaterThan(1)
expect(results).to.deep.contain(helper.rowGenerator('Test Region', baseCaseProgressRow, 4))
expect(results).to.deep.contain(helper.rowGenerator('Test Region', baseCaseProgressRow))
})
})
after(function () {
return helper.removeInsertedData(inserts)
})
})
|
#!/usr/bin/env python2
# PLC4TRUCKSDuck (c) 2020 National Motor Freight Traffic Association
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import mmap
import pypruss # available only in python 2
import select
import signal
import socket
import struct
import sys
import threading
import time
TARGET_PRU_FW = 'j17084truckduck.bin'
TARGET_PRU_NO = 1
UDP_PORTS = (6969, 6970)
DDR_START = 0x10000000 # 256MiB
DDR_VADDR = 0x4a300000
DDR_SIZE = pypruss.ddr_size()
DDR_END = DDR_START + DDR_SIZE
if TARGET_PRU_NO == 0:
TARGET_PRU_INTERRUPT = pypruss.PRU0_ARM_INTERRUPT
TARGET_PRU_PRE_SIZE = 0
else:
TARGET_PRU_INTERRUPT = pypruss.PRU1_ARM_INTERRUPT
TARGET_PRU_PRE_SIZE = 8192
SHARED_ADDR = DDR_VADDR + TARGET_PRU_PRE_SIZE
SHARED_OFFSET = SHARED_ADDR - DDR_START
SHARED_FILELEN = DDR_SIZE + DDR_START
PAYLOAD_LEN = 255 # must match the same in j17084truckduck.c
FRAME_SIZE = 256 # must match the same in j17084truckduck.c
RX_RING_BUFFER_LEN = 16 # must match the same in j17084truckduck.c
TX_RING_BUFFER_LEN = 4 # must match the same in j17084truckduck.c
RING_BUFFER_CONSUME_OFFSET = 4 # must match the same in j17084truckduck.c
RING_BUFFER_FRAMES_OFFSET = 8 # must match the same in j17084truckduck.c
RX_RING_BUFFER_VADDR_OFFSET = 0 # must match the same in j17084truckduck.c
RX_RING_BUFFER_SIZE = 4104 # must match the same in j17084truckduck.c
TX_RING_BUFFER_VADDR_OFFSET = 4104 # must match the same in j17084truckduck.c
class PRU_read_thread(threading.Thread):
def __init__(self, stopped, socket, ddr_mem):
super(PRU_read_thread, self).__init__()
self.ddr_mem = ddr_mem
self.struct_start = DDR_START + RX_RING_BUFFER_VADDR_OFFSET
self.frames_base = self.struct_start + RING_BUFFER_FRAMES_OFFSET
self.frames_ptr = self.frames_base
self.calls = 0
self.socket = socket
self.stopped = stopped
def kill_me(self):
self.stopped.set()
def join(self, timeout=None):
super(PRU_read_thread, self).join(timeout)
data = self.ddr_mem[DDR_START:DDR_START + RX_RING_BUFFER_SIZE]
msg = map(lambda x: "{:02x}".format(ord(x)), data)
for i in range(8, len(msg), FRAME_SIZE):
print(",".join(msg[i:i + FRAME_SIZE]))
def run(self):
old_consume = 0
while not self.stopped.is_set():
pypruss.wait_for_event(TARGET_PRU_NO)
pypruss.clear_event(TARGET_PRU_NO, TARGET_PRU_INTERRUPT)
self.calls += 1
(produce, consume) = \
struct.unpack("LL", self.ddr_mem[DDR_START:DDR_START +
RING_BUFFER_FRAMES_OFFSET])
while consume != produce:
length = struct.unpack("B", self.ddr_mem[self.frames_ptr])[0]
frame = \
struct.unpack("B"*length,
self.ddr_mem[self.frames_ptr+1:
self.frames_ptr+1+length])
self.socket.sendto(''.join(map(chr, frame)),
('localhost', UDP_PORTS[1]))
consume = (consume + 1) % RX_RING_BUFFER_LEN
self.frames_ptr = self.frames_base + \
(consume * FRAME_SIZE)
if old_consume != consume:
self.ddr_mem[DDR_START + RING_BUFFER_CONSUME_OFFSET:
DDR_START + RING_BUFFER_FRAMES_OFFSET] = \
struct.pack('L', consume)
old_consume = consume
class PRU_write_thread(threading.Thread):
def __init__(self, stopped, socket, ddr_mem):
super(PRU_write_thread, self).__init__()
self.ddr_mem = ddr_mem
self.struct_start = DDR_START + TX_RING_BUFFER_VADDR_OFFSET
self.frames_base = self.struct_start + RING_BUFFER_FRAMES_OFFSET
self.frames_ptr = self.frames_base
self.socket = socket
self.stopped = stopped
def kill_me(self):
self.stopped.set()
def join(self, timeout=None):
super(PRU_write_thread, self).join(timeout)
def run(self):
while not self.stopped.is_set():
ready = select.select([self.socket], [], [], 0.5)[0]
if ready == []:
continue
frame = self.socket.recv(PAYLOAD_LEN)
(produce, consume) = \
struct.unpack('LL',
self.ddr_mem[self.struct_start:
self.struct_start +
RING_BUFFER_FRAMES_OFFSET])
while (produce + 1) % TX_RING_BUFFER_LEN == consume:
sys.stderr.write("buffer full, waiting\n")
time.sleep(0.003)
(produce, consume) = \
struct.unpack('LL',
self.ddr_mem[self.struct_start:
self.struct_start +
RING_BUFFER_FRAMES_OFFSET])
if len(frame) > PAYLOAD_LEN:
# truncate at maximum payload length
frame = frame[:PAYLOAD_LEN]
len_byte = struct.pack('B', len(frame))
self.ddr_mem[self.frames_ptr] = len_byte
frame_offset = self.frames_ptr + 1
self.ddr_mem[frame_offset:frame_offset + len(frame)] = \
frame
produce = (produce + 1) % TX_RING_BUFFER_LEN
self.frames_ptr = \
self.frames_base + (produce * FRAME_SIZE)
self.ddr_mem[self.struct_start:self.struct_start +
RING_BUFFER_CONSUME_OFFSET] = \
struct.pack('L', produce)
pypruss.modprobe()
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
try:
sock.bind(('localhost', UDP_PORTS[0]))
except OSError as e:
print(e)
sys.exit(-1)
f = open("/dev/mem", "r+b")
shared_mem = mmap.mmap(f.fileno(), SHARED_FILELEN, offset=SHARED_OFFSET)
if TARGET_PRU_NO == 1:
pypruss.init()
pypruss.open(TARGET_PRU_NO)
if TARGET_PRU_NO == 1:
pypruss.pruintc_init()
stopped = threading.Event()
stopped.clear()
pru_stop_thread = PRU_read_thread(stopped, sock, shared_mem)
pru_send_thread = PRU_write_thread(stopped, sock, shared_mem)
pru_stop_thread.start()
pru_send_thread.start()
pypruss.exec_program(TARGET_PRU_NO, TARGET_PRU_FW)
def signal_handler(signal, frame):
pru_stop_thread.kill_me()
pru_send_thread.kill_me()
pru_stop_thread.join()
pru_send_thread.join()
signal.signal(signal.SIGINT, signal_handler)
pru_stop_thread.join()
pru_send_thread.join()
pypruss.exit()
|
"use strict";
/**
* Kubernetes
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: v1.20.2
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.V1RuleWithOperations = void 0;
/**
* RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.
*/
class V1RuleWithOperations {
static getAttributeTypeMap() {
return V1RuleWithOperations.attributeTypeMap;
}
}
exports.V1RuleWithOperations = V1RuleWithOperations;
V1RuleWithOperations.discriminator = undefined;
V1RuleWithOperations.attributeTypeMap = [
{
"name": "apiGroups",
"baseName": "apiGroups",
"type": "Array<string>"
},
{
"name": "apiVersions",
"baseName": "apiVersions",
"type": "Array<string>"
},
{
"name": "operations",
"baseName": "operations",
"type": "Array<string>"
},
{
"name": "resources",
"baseName": "resources",
"type": "Array<string>"
},
{
"name": "scope",
"baseName": "scope",
"type": "string"
}
];
//# sourceMappingURL=v1RuleWithOperations.js.map
|
import aiolmdb
import asynctest
import gc
import os
import tempfile
import traceback
import shutil
import stat
import sys
class AiolmdbTestCase(asynctest.TestCase):
def setUp(self):
self.cleanups = []
def tearDown(self):
for cleanup in self.cleanups:
try:
cleanup()
except Exception:
traceback.print_exc()
def create_dir(self, create=True):
env_dir = tempfile.TemporaryDirectory()
self.cleanups.append(lambda: env_dir.cleanup())
path = tempfile.mkdtemp(prefix='lmdb_test')
assert path is not None, 'tempfile.mkdtemp failed'
if not create:
os.rmdir(path)
self.cleanups.append(lambda: shutil.rmtree(path, ignore_errors=True))
if hasattr(path, 'decode'):
path = path.decode(sys.getfilesystemencoding())
return path
def create_env(self, path=None, max_dbs=10, **kwargs):
if not path:
path = self.create_dir()
env = aiolmdb.open(path, max_dbs=max_dbs, **kwargs)
self.cleanups.append(env.close)
return path, env
def create_file(self, create=True):
fd, path = tempfile.mkstemp(prefix='lmdb_test')
assert path is not None, 'tempfile.mkstemp failed'
os.close(fd)
if not create:
os.unlink(path)
self.cleanups.append(lambda: os.path.exists(path) and os.unlink(path))
pathlock = path + '-lock'
self.cleanups.append(lambda: os.path.exists(
pathlock) and os.unlink(pathlock))
if hasattr(path, 'decode'):
path = path.decode(sys.getfilesystemencoding())
return path
def path_mode(self, path):
return stat.S_IMODE(os.stat(path).st_mode)
def debug_collect():
if hasattr(gc, 'set_debug') and hasattr(gc, 'get_debug'):
old = gc.get_debug()
gc.set_debug(gc.DEBUG_LEAK)
gc.collect()
gc.set_debug(old)
else:
for x in range(10):
# PyPy doesn't collect objects with __del__ on first attempt.
gc.collect()
|
from __future__ import print_function, division
from math import log as _log
from .sympify import _sympify
from .cache import cacheit
from .singleton import S
from .expr import Expr
from .evalf import PrecisionExhausted
from .function import (_coeff_isneg, expand_complex, expand_multinomial,
expand_mul)
from .logic import fuzzy_bool, fuzzy_not, fuzzy_and
from .compatibility import as_int, range
from .evaluate import global_evaluate
from sympy.utilities.iterables import sift
from mpmath.libmp import sqrtrem as mpmath_sqrtrem
from math import sqrt as _sqrt
def isqrt(n):
"""Return the largest integer less than or equal to sqrt(n)."""
if n < 17984395633462800708566937239552:
return int(_sqrt(n))
return integer_nthroot(int(n), 2)[0]
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
Examples
========
>>> from sympy import integer_nthroot
>>> integer_nthroot(16, 2)
(4, True)
>>> integer_nthroot(26, 2)
(5, False)
To simply determine if a number is a perfect square, the is_square
function should be used:
>>> from sympy.ntheory.primetest import is_square
>>> is_square(26)
False
See Also
========
sympy.ntheory.primetest.is_square
integer_log
"""
y, n = as_int(y), as_int(n)
if y < 0:
raise ValueError("y must be nonnegative")
if n < 1:
raise ValueError("n must be positive")
if y in (0, 1):
return y, True
if n == 1:
return y, True
if n == 2:
x, rem = mpmath_sqrtrem(y)
return int(x), not rem
if n > y:
return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y**(1./n) + 0.5)
except OverflowError:
exp = _log(y, 2)/n
if exp > 53:
shift = int(exp - 53)
guess = int(2.0**(exp - shift) + 1) << shift
else:
guess = int(2.0**exp)
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n - 1)
xprev, x = x, ((n - 1)*x + y//t)//n
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return int(x), t == y # int converts long to int if possible
def integer_log(y, x):
"""Returns (e, bool) where e is the largest nonnegative integer
such that |y| >= |x**e| and bool is True if y == x**e
Examples
========
>>> from sympy import integer_log
>>> integer_log(125, 5)
(3, True)
>>> integer_log(17, 9)
(1, False)
>>> integer_log(4, -2)
(2, True)
>>> integer_log(-125,-5)
(3, True)
See Also
========
integer_nthroot
sympy.ntheory.primetest.is_square
sympy.ntheory.factor_.multiplicity
sympy.ntheory.factor_.perfect_power
"""
if x == 1:
raise ValueError('x cannot take value as 1')
if y == 0:
raise ValueError('y cannot take value as 0')
if x in (-2, 2):
x = int(x)
y = as_int(y)
e = y.bit_length() - 1
return e, x**e == y
if x < 0:
n, b = integer_log(y if y > 0 else -y, -x)
return n, b and bool(n % 2 if y < 0 else not n % 2)
x = as_int(x)
y = as_int(y)
r = e = 0
while y >= x:
d = x
m = 1
while y >= d:
y, rem = divmod(y, d)
r = r or rem
e += m
if y > d:
d *= d
m *= 2
return e, r == 0 and y == 1
class Pow(Expr):
"""
Defines the expression x**y as "x raised to a power y"
Singleton definitions involving (0, 1, -1, oo, -oo, I, -I):
+--------------+---------+-----------------------------------------------+
| expr | value | reason |
+==============+=========+===============================================+
| z**0 | 1 | Although arguments over 0**0 exist, see [2]. |
+--------------+---------+-----------------------------------------------+
| z**1 | z | |
+--------------+---------+-----------------------------------------------+
| (-oo)**(-1) | 0 | |
+--------------+---------+-----------------------------------------------+
| (-1)**-1 | -1 | |
+--------------+---------+-----------------------------------------------+
| S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be |
| | | undefined, but is convenient in some contexts |
| | | where the base is assumed to be positive. |
+--------------+---------+-----------------------------------------------+
| 1**-1 | 1 | |
+--------------+---------+-----------------------------------------------+
| oo**-1 | 0 | |
+--------------+---------+-----------------------------------------------+
| 0**oo | 0 | Because for all complex numbers z near |
| | | 0, z**oo -> 0. |
+--------------+---------+-----------------------------------------------+
| 0**-oo | zoo | This is not strictly true, as 0**oo may be |
| | | oscillating between positive and negative |
| | | values or rotating in the complex plane. |
| | | It is convenient, however, when the base |
| | | is positive. |
+--------------+---------+-----------------------------------------------+
| 1**oo | nan | Because there are various cases where |
| 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), |
| | | but lim( x(t)**y(t), t) != 1. See [3]. |
+--------------+---------+-----------------------------------------------+
| b**zoo | nan | Because b**z has no limit as z -> zoo |
+--------------+---------+-----------------------------------------------+
| (-1)**oo | nan | Because of oscillations in the limit. |
| (-1)**(-oo) | | |
+--------------+---------+-----------------------------------------------+
| oo**oo | oo | |
+--------------+---------+-----------------------------------------------+
| oo**-oo | 0 | |
+--------------+---------+-----------------------------------------------+
| (-oo)**oo | nan | |
| (-oo)**-oo | | |
+--------------+---------+-----------------------------------------------+
| oo**I | nan | oo**e could probably be best thought of as |
| (-oo)**I | | the limit of x**e for real x as x tends to |
| | | oo. If e is I, then the limit does not exist |
| | | and nan is used to indicate that. |
+--------------+---------+-----------------------------------------------+
| oo**(1+I) | zoo | If the real part of e is positive, then the |
| (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value |
| | | is zoo. |
+--------------+---------+-----------------------------------------------+
| oo**(-1+I) | 0 | If the real part of e is negative, then the |
| -oo**(-1+I) | | limit is 0. |
+--------------+---------+-----------------------------------------------+
Because symbolic computations are more flexible that floating point
calculations and we prefer to never return an incorrect answer,
we choose not to conform to all IEEE 754 conventions. This helps
us avoid extra test-case code in the calculation of limits.
See Also
========
sympy.core.numbers.Infinity
sympy.core.numbers.NegativeInfinity
sympy.core.numbers.NaN
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentiation
.. [2] https://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero
.. [3] https://en.wikipedia.org/wiki/Indeterminate_forms
"""
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, evaluate=None):
if evaluate is None:
evaluate = global_evaluate[0]
from sympy.functions.elementary.exponential import exp_polar
b = _sympify(b)
e = _sympify(e)
if evaluate:
if e is S.ComplexInfinity:
return S.NaN
if e is S.Zero:
return S.One
elif e is S.One:
return b
elif e == -1 and not b:
return S.ComplexInfinity
# Only perform autosimplification if exponent or base is a Symbol or number
elif (b.is_Symbol or b.is_number) and (e.is_Symbol or e.is_number) and\
e.is_integer and _coeff_isneg(b):
if e.is_even:
b = -b
elif e.is_odd:
return -Pow(-b, e)
if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0
return S.NaN
elif b is S.One:
if abs(e).is_infinite:
return S.NaN
return S.One
else:
# recognize base as E
if not e.is_Atom and b is not S.Exp1 and not isinstance(b, exp_polar):
from sympy import numer, denom, log, sign, im, factor_terms
c, ex = factor_terms(e, sign=False).as_coeff_Mul()
den = denom(ex)
if isinstance(den, log) and den.args[0] == b:
return S.Exp1**(c*numer(ex))
elif den.is_Add:
s = sign(im(b))
if s.is_Number and s and den == \
log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi:
return S.Exp1**(c*numer(ex))
obj = b._eval_power(e)
if obj is not None:
return obj
obj = Expr.__new__(cls, b, e)
obj = cls._exec_constructor_postprocessors(obj)
if not isinstance(obj, Pow):
return obj
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@classmethod
def class_key(cls):
return 3, 2, cls.__name__
def _eval_refine(self, assumptions):
from sympy.assumptions.ask import ask, Q
b, e = self.as_base_exp()
if ask(Q.integer(e), assumptions) and _coeff_isneg(b):
if ask(Q.even(e), assumptions):
return Pow(-b, e)
elif ask(Q.odd(e), assumptions):
return -Pow(-b, e)
def _eval_power(self, other):
from sympy import Abs, arg, exp, floor, im, log, re, sign
b, e = self.as_base_exp()
if b is S.NaN:
return (b**e)**other # let __new__ handle it
s = None
if other.is_integer:
s = 1
elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)...
s = 1
elif e.is_extended_real is not None:
# helper functions ===========================
def _half(e):
"""Return True if the exponent has a literal 2 as the
denominator, else None."""
if getattr(e, 'q', None) == 2:
return True
n, d = e.as_numer_denom()
if n.is_integer and d == 2:
return True
def _n2(e):
"""Return ``e`` evaluated to a Number with 2 significant
digits, else None."""
try:
rv = e.evalf(2, strict=True)
if rv.is_Number:
return rv
except PrecisionExhausted:
pass
# ===================================================
if e.is_extended_real:
# we need _half(other) with constant floor or
# floor(S.Half - e*arg(b)/2/pi) == 0
# handle -1 as special case
if e == -1:
# floor arg. is 1/2 + arg(b)/2/pi
if _half(other):
if b.is_negative is True:
return S.NegativeOne**other*Pow(-b, e*other)
if b.is_extended_real is False:
return Pow(b.conjugate()/Abs(b)**2, other)
elif e.is_even:
if b.is_extended_real:
b = abs(b)
if b.is_imaginary:
b = abs(im(b))*S.ImaginaryUnit
if (abs(e) < 1) == True or e == 1:
s = 1 # floor = 0
elif b.is_extended_nonnegative:
s = 1 # floor = 0
elif re(b).is_extended_nonnegative and (abs(e) < 2) == True:
s = 1 # floor = 0
elif fuzzy_not(im(b).is_zero) and abs(e) == 2:
s = 1 # floor = 0
elif _half(other):
s = exp(2*S.Pi*S.ImaginaryUnit*other*floor(
S.Half - e*arg(b)/(2*S.Pi)))
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
else:
# e.is_extended_real is False requires:
# _half(other) with constant floor or
# floor(S.Half - im(e*log(b))/2/pi) == 0
try:
s = exp(2*S.ImaginaryUnit*S.Pi*other*
floor(S.Half - im(e*log(b))/2/S.Pi))
# be careful to test that s is -1 or 1 b/c sign(I) == I:
# so check that s is real
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
except PrecisionExhausted:
s = None
if s is not None:
return s*Pow(b, e*other)
def _eval_Mod(self, q):
if self.exp.is_integer and self.exp.is_positive:
if q.is_integer and self.base % q == 0:
return S.Zero
'''
For unevaluated Integer power, use built-in pow modular
exponentiation, if powers are not too large wrt base.
'''
if self.base.is_Integer and self.exp.is_Integer and q.is_Integer:
b, e, m = int(self.base), int(self.exp), int(q)
# For very large powers, use totient reduction if e >= lg(m).
# Bound on m, is for safe factorization memory wise ie m^(1/4).
# For pollard-rho to be faster than built-in pow lg(e) > m^(1/4)
# check is added.
mb = m.bit_length()
if mb <= 80 and e >= mb and e.bit_length()**4 >= m:
from sympy.ntheory import totient
phi = totient(m)
return pow(b, phi + e%phi, m)
else:
return pow(b, e, m)
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
return self.base.is_even
def _eval_is_negative(self):
ext_neg = Pow._eval_is_extended_negative(self)
if ext_neg is True:
return self.is_finite
return ext_neg
def _eval_is_positive(self):
ext_pos = Pow._eval_is_extended_positive(self)
if ext_pos is True:
return self.is_finite
return ext_pos
def _eval_is_extended_positive(self):
from sympy import log
if self.base == self.exp:
if self.base.is_extended_nonnegative:
return True
elif self.base.is_positive:
if self.exp.is_extended_real:
return True
elif self.base.is_extended_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return self.exp.is_zero
elif self.base.is_extended_nonpositive:
if self.exp.is_odd:
return False
elif self.base.is_imaginary:
if self.exp.is_integer:
m = self.exp % 4
if m.is_zero:
return True
if m.is_integer and m.is_zero is False:
return False
if self.exp.is_imaginary:
return log(self.base).is_imaginary
def _eval_is_extended_negative(self):
if self.base.is_extended_negative:
if self.exp.is_odd and self.base.is_finite:
return True
if self.exp.is_even:
return False
elif self.base.is_extended_positive:
if self.exp.is_extended_real:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return False
elif self.base.is_extended_nonnegative:
if self.exp.is_extended_nonnegative:
return False
elif self.base.is_extended_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_extended_real:
if self.exp.is_even:
return False
def _eval_is_zero(self):
if self.base.is_zero:
if self.exp.is_extended_positive:
return True
elif self.exp.is_extended_nonpositive:
return False
elif self.base.is_zero is False:
if self.exp.is_negative:
return self.base.is_infinite
elif self.exp.is_nonnegative:
return False
elif self.exp.is_infinite:
if (1 - abs(self.base)).is_extended_positive:
return self.exp.is_extended_positive
elif (1 - abs(self.base)).is_extended_negative:
return self.exp.is_extended_negative
else:
# when self.base.is_zero is None
return None
def _eval_is_integer(self):
b, e = self.args
if b.is_rational:
if b.is_integer is False and e.is_positive:
return False # rat**nonneg
if b.is_integer and e.is_integer:
if b is S.NegativeOne:
return True
if e.is_nonnegative or e.is_positive:
return True
if b.is_integer and e.is_negative and (e.is_finite or e.is_integer):
if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero):
return False
if b.is_Number and e.is_Number:
check = self.func(*self.args)
return check.is_Integer
def _eval_is_extended_real(self):
from sympy import arg, exp, log, Mul
real_b = self.base.is_extended_real
if real_b is None:
if self.base.func == exp and self.base.args[0].is_imaginary:
return self.exp.is_imaginary
return
real_e = self.exp.is_extended_real
if real_e is None:
return
if real_b and real_e:
if self.base.is_extended_positive:
return True
elif self.base.is_extended_nonnegative:
if self.exp.is_extended_nonnegative:
return True
else:
if self.exp.is_integer:
return True
elif self.base.is_extended_negative:
if self.exp.is_Rational:
return False
if real_e and self.exp.is_extended_negative:
return Pow(self.base, -self.exp).is_extended_real
im_b = self.base.is_imaginary
im_e = self.exp.is_imaginary
if im_b:
if self.exp.is_integer:
if self.exp.is_even:
return True
elif self.exp.is_odd:
return False
elif im_e and log(self.base).is_imaginary:
return True
elif self.exp.is_Add:
c, a = self.exp.as_coeff_Add()
if c and c.is_Integer:
return Mul(
self.base**c, self.base**a, evaluate=False).is_extended_real
elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit):
if (self.exp/2).is_integer is False:
return False
if real_b and im_e:
if self.base is S.NegativeOne:
return True
c = self.exp.coeff(S.ImaginaryUnit)
if c:
ok = (c*log(self.base)/S.Pi).is_Integer
if ok is not None:
return ok
if real_b is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
return i.is_integer
def _eval_is_complex(self):
if all(a.is_complex for a in self.args):
return True
def _eval_is_imaginary(self):
from sympy import arg, log
if self.base.is_imaginary:
if self.exp.is_integer:
odd = self.exp.is_odd
if odd is not None:
return odd
return
if self.exp.is_imaginary:
imlog = log(self.base).is_imaginary
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if self.base.is_extended_real and self.exp.is_extended_real:
if self.base.is_positive:
return False
else:
rat = self.exp.is_rational
if not rat:
return rat
if self.exp.is_integer:
return False
else:
half = (2*self.exp).is_integer
if half:
return self.base.is_negative
return half
if self.base.is_extended_real is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
isodd = (2*i).is_odd
if isodd is not None:
return isodd
if self.exp.is_negative:
return (1/self).is_imaginary
def _eval_is_odd(self):
if self.exp.is_integer:
if self.exp.is_positive:
return self.base.is_odd
elif self.exp.is_nonnegative and self.base.is_odd:
return True
elif self.base is S.NegativeOne:
return True
def _eval_is_finite(self):
if self.exp.is_negative:
if self.base.is_zero:
return False
if self.base.is_infinite or self.base.is_nonzero:
return True
c1 = self.base.is_finite
if c1 is None:
return
c2 = self.exp.is_finite
if c2 is None:
return
if c1 and c2:
if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero):
return True
def _eval_is_prime(self):
'''
An integer raised to the n(>=2)-th power cannot be a prime.
'''
if self.base.is_integer and self.exp.is_integer and (self.exp - 1).is_positive:
return False
def _eval_is_composite(self):
"""
A power is composite if both base and exponent are greater than 1
"""
if (self.base.is_integer and self.exp.is_integer and
((self.base - 1).is_positive and (self.exp - 1).is_positive or
(self.base + 1).is_negative and self.exp.is_positive and self.exp.is_even)):
return True
def _eval_is_polar(self):
return self.base.is_polar
def _eval_subs(self, old, new):
from sympy import exp, log, Symbol
def _check(ct1, ct2, old):
"""Return (bool, pow, remainder_pow) where, if bool is True, then the
exponent of Pow `old` will combine with `pow` so the substitution
is valid, otherwise bool will be False.
For noncommutative objects, `pow` will be an integer, and a factor
`Pow(old.base, remainder_pow)` needs to be included. If there is
no such factor, None is returned. For commutative objects,
remainder_pow is always None.
cti are the coefficient and terms of an exponent of self or old
In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y)
will give y**2 since (b**x)**2 == b**(2*x); if that equality does
not hold then the substitution should not occur so `bool` will be
False.
"""
coeff1, terms1 = ct1
coeff2, terms2 = ct2
if terms1 == terms2:
if old.is_commutative:
# Allow fractional powers for commutative objects
pow = coeff1/coeff2
try:
as_int(pow, strict=False)
combines = True
except ValueError:
combines = isinstance(Pow._eval_power(
Pow(*old.as_base_exp(), evaluate=False),
pow), (Pow, exp, Symbol))
return combines, pow, None
else:
# With noncommutative symbols, substitute only integer powers
if not isinstance(terms1, tuple):
terms1 = (terms1,)
if not all(term.is_integer for term in terms1):
return False, None, None
try:
# Round pow toward zero
pow, remainder = divmod(as_int(coeff1), as_int(coeff2))
if pow < 0 and remainder != 0:
pow += 1
remainder -= as_int(coeff2)
if remainder == 0:
remainder_pow = None
else:
remainder_pow = Mul(remainder, *terms1)
return True, pow, remainder_pow
except ValueError:
# Can't substitute
pass
return False, None, None
if old == self.base:
return new**self.exp._subs(old, new)
# issue 10829: (4**x - 3*y + 2).subs(2**x, y) -> y**2 - 3*y + 2
if isinstance(old, self.func) and self.exp == old.exp:
l = log(self.base, old.base)
if l.is_Number:
return Pow(new, l)
if isinstance(old, self.func) and self.base == old.base:
if self.exp.is_Add is False:
ct1 = self.exp.as_independent(Symbol, as_Add=False)
ct2 = old.exp.as_independent(Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
# issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2
result = self.func(new, pow)
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
else: # b**(6*x + a).subs(b**(3*x), y) -> y**2 * b**a
# exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2))
oarg = old.exp
new_l = []
o_al = []
ct2 = oarg.as_coeff_mul()
for a in self.exp.args:
newa = a._subs(old, new)
ct1 = newa.as_coeff_mul()
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
new_l.append(new**pow)
if remainder_pow is not None:
o_al.append(remainder_pow)
continue
elif not old.is_commutative and not newa.is_integer:
# If any term in the exponent is non-integer,
# we do not do any substitutions in the noncommutative case
return
o_al.append(newa)
if new_l:
expo = Add(*o_al)
new_l.append(Pow(self.base, expo, evaluate=False) if expo != 1 else self.base)
return Mul(*new_l)
if isinstance(old, exp) and self.exp.is_extended_real and self.base.is_positive:
ct1 = old.args[0].as_independent(Symbol, as_Add=False)
ct2 = (self.exp*log(self.base)).as_independent(
Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
result = self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
def as_base_exp(self):
"""Return base and exp of self.
If base is 1/Integer, then return Integer, -exp. If this extra
processing is not needed, the base and exp properties will
give the raw arguments
Examples
========
>>> from sympy import Pow, S
>>> p = Pow(S.Half, 2, evaluate=False)
>>> p.as_base_exp()
(2, -2)
>>> p.args
(1/2, 2)
"""
b, e = self.args
if b.is_Rational and b.p == 1 and b.q != 1:
return Integer(b.q), -e
return b, e
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import adjoint
i, p = self.exp.is_integer, self.base.is_positive
if i:
return adjoint(self.base)**self.exp
if p:
return self.base**adjoint(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return adjoint(expanded)
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
i, p = self.exp.is_integer, self.base.is_positive
if i:
return c(self.base)**self.exp
if p:
return self.base**c(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return c(expanded)
if self.is_extended_real:
return self
def _eval_transpose(self):
from sympy.functions.elementary.complexes import transpose
i, p = self.exp.is_integer, self.base.is_complex
if p:
return self.base**self.exp
if i:
return transpose(self.base)**self.exp
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return transpose(expanded)
def _eval_expand_power_exp(self, **hints):
"""a**(n + m) -> a**n*a**m"""
b = self.base
e = self.exp
if e.is_Add and e.is_commutative:
expr = []
for x in e.args:
expr.append(self.func(self.base, x))
return Mul(*expr)
return self.func(b, e)
def _eval_expand_power_base(self, **hints):
"""(a*b)**n -> a**n * b**n"""
force = hints.get('force', False)
b = self.base
e = self.exp
if not b.is_Mul:
return self
cargs, nc = b.args_cnc(split_1=False)
# expand each term - this is top-level-only
# expansion but we have to watch out for things
# that don't have an _eval_expand method
if nc:
nc = [i._eval_expand_power_base(**hints)
if hasattr(i, '_eval_expand_power_base') else i
for i in nc]
if e.is_Integer:
if e.is_positive:
rv = Mul(*nc*e)
else:
rv = Mul(*[i**-1 for i in nc[::-1]]*-e)
if cargs:
rv *= Mul(*cargs)**e
return rv
if not cargs:
return self.func(Mul(*nc), e, evaluate=False)
nc = [Mul(*nc)]
# sift the commutative bases
other, maybe_real = sift(cargs, lambda x: x.is_extended_real is False,
binary=True)
def pred(x):
if x is S.ImaginaryUnit:
return S.ImaginaryUnit
polar = x.is_polar
if polar:
return True
if polar is None:
return fuzzy_bool(x.is_extended_nonnegative)
sifted = sift(maybe_real, pred)
nonneg = sifted[True]
other += sifted[None]
neg = sifted[False]
imag = sifted[S.ImaginaryUnit]
if imag:
I = S.ImaginaryUnit
i = len(imag) % 4
if i == 0:
pass
elif i == 1:
other.append(I)
elif i == 2:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
else:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
other.append(I)
del imag
# bring out the bases that can be separated from the base
if force or e.is_integer:
# treat all commutatives the same and put nc in other
cargs = nonneg + neg + other
other = nc
else:
# this is just like what is happening automatically, except
# that now we are doing it for an arbitrary exponent for which
# no automatic expansion is done
assert not e.is_Integer
# handle negatives by making them all positive and putting
# the residual -1 in other
if len(neg) > 1:
o = S.One
if not other and neg[0].is_Number:
o *= neg.pop(0)
if len(neg) % 2:
o = -o
for n in neg:
nonneg.append(-n)
if o is not S.One:
other.append(o)
elif neg and other:
if neg[0].is_Number and neg[0] is not S.NegativeOne:
other.append(S.NegativeOne)
nonneg.append(-neg[0])
else:
other.extend(neg)
else:
other.extend(neg)
del neg
cargs = nonneg
other += nc
rv = S.One
if cargs:
rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs])
if other:
rv *= self.func(Mul(*other), e, evaluate=False)
return rv
def _eval_expand_multinomial(self, **hints):
"""(a + b + ..)**n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
base, exp = self.args
result = self
if exp.is_Rational and exp.p > 0 and base.is_Add:
if not exp.is_Integer:
n = Integer(exp.p // exp.q)
if not n:
return result
else:
radical, result = self.func(base, exp - n), []
expanded_base_n = self.func(base, n)
if expanded_base_n.is_Pow:
expanded_base_n = \
expanded_base_n._eval_expand_multinomial()
for term in Add.make_args(expanded_base_n):
result.append(term*radical)
return Add(*result)
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for b in base.args:
if b.is_Order:
order_terms.append(b)
else:
other_terms.append(b)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
o = Add(*order_terms)
if n == 2:
return expand_multinomial(f**n, deep=False) + n*f*o
else:
g = expand_multinomial(f**(n - 1), deep=False)
return expand_mul(f*g, deep=False) + n*g*o
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = self.func(a.q * b.q, n)
a, b = a.p*b.q, a.q*b.p
else:
k = self.func(a.q, n)
a, b = a.p, a.q*b
elif not b.is_Integer:
k = self.func(b.q, n)
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c - b*d, b*c + a*d
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x + y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
from sympy.polys.polyutils import basic_from_dict
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
return basic_from_dict(expansion_dict, *p)
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n - 1))._eval_expand_multinomial()
if multi.is_Add:
return Add(*[f*g for f in base.args
for g in multi.args])
else:
# XXX can this ever happen if base was an Add?
return Add(*[f*multi for f in base.args])
elif (exp.is_Rational and exp.p < 0 and base.is_Add and
abs(exp.p) > exp.q):
return 1 / self.func(base, -exp)._eval_expand_multinomial()
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= self.func(base, term)
else:
tail += term
return coeff * self.func(base, tail)
else:
return result
def as_real_imag(self, deep=True, **hints):
from sympy import atan2, cos, im, re, sin
from sympy.polys.polytools import poly
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag(deep=deep)
if not im:
return self, S.Zero
a, b = symbols('a b', cls=Dummy)
if exp >= 0:
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial(self.base**exp)
if expr != self:
return expr.as_real_imag()
expr = poly(
(a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp
else:
mag = re**2 + im**2
re, im = re/mag, -im/mag
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial((re + im*S.ImaginaryUnit)**-exp)
if expr != self:
return expr.as_real_imag()
expr = poly((a + b)**-exp)
# Terms with even b powers will be real
r = [i for i in expr.terms() if not i[0][1] % 2]
re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
# Terms with odd b powers will be imaginary
r = [i for i in expr.terms() if i[0][1] % 4 == 1]
im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
r = [i for i in expr.terms() if i[0][1] % 4 == 3]
im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
return (re_part.subs({a: re, b: S.ImaginaryUnit*im}),
im_part1.subs({a: re, b: im}) + im_part3.subs({a: re, b: -im}))
elif self.exp.is_Rational:
re, im = self.base.as_real_imag(deep=deep)
if im.is_zero and self.exp is S.Half:
if re.is_extended_nonnegative:
return self, S.Zero
if re.is_extended_nonpositive:
return S.Zero, (-self.base)**self.exp
# XXX: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
r = self.func(self.func(re, 2) + self.func(im, 2), S.Half)
t = atan2(im, re)
rp, tp = self.func(r, self.exp), t*self.exp
return (rp*cos(tp), rp*sin(tp))
else:
if deep:
hints['complex'] = False
expanded = self.expand(deep, **hints)
if hints.get('ignore') == expanded:
return None
else:
return (re(expanded), im(expanded))
else:
return (re(self), im(self))
def _eval_derivative(self, s):
from sympy import log
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp.is_negative and base.is_number and base.is_extended_real is False:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return self.func(base, exp).expand()
return self.func(base, exp)
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return bool(self.base._eval_is_polynomial(syms) and
self.exp.is_Integer and (self.exp >= 0))
else:
return True
def _eval_is_rational(self):
# The evaluation of self.func below can be very expensive in the case
# of integer**integer if the exponent is large. We should try to exit
# before that if possible:
if (self.exp.is_integer and self.base.is_rational
and fuzzy_not(fuzzy_and([self.exp.is_negative, self.base.is_zero]))):
return True
p = self.func(*self.as_base_exp()) # in case it's unevaluated
if not p.is_Pow:
return p.is_rational
b, e = p.as_base_exp()
if e.is_Rational and b.is_Rational:
# we didn't check that e is not an Integer
# because Rational**Integer autosimplifies
return False
if e.is_integer:
if b.is_rational:
if fuzzy_not(b.is_zero) or e.is_nonnegative:
return True
if b == e: # always rational, even for 0**0
return True
elif b.is_irrational:
return e.is_zero
def _eval_is_algebraic(self):
def _is_one(expr):
try:
return (expr - 1).is_zero
except ValueError:
# when the operation is not allowed
return False
if self.base.is_zero or _is_one(self.base):
return True
elif self.exp.is_rational:
if self.base.is_algebraic is False:
return self.exp.is_zero
return self.base.is_algebraic
elif self.base.is_algebraic and self.exp.is_algebraic:
if ((fuzzy_not(self.base.is_zero)
and fuzzy_not(_is_one(self.base)))
or self.base.is_integer is False
or self.base.is_irrational):
return self.exp.is_rational
def _eval_is_rational_function(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_rational_function(syms) and \
self.exp.is_Integer
else:
return True
def _eval_is_algebraic_expr(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_algebraic_expr(syms) and \
self.exp.is_Rational
else:
return True
def _eval_rewrite_as_exp(self, base, expo, **kwargs):
from sympy import exp, log, I, arg
if base.is_zero or base.has(exp) or expo.has(exp):
return base**expo
if base.has(Symbol):
# delay evaluation if expo is non symbolic
# (as exp(x*log(5)) automatically reduces to x**5)
return exp(log(base)*expo, evaluate=expo.has(Symbol))
else:
return exp((log(abs(base)) + I*arg(base))*expo)
def as_numer_denom(self):
if not self.is_commutative:
return self, S.One
base, exp = self.as_base_exp()
n, d = base.as_numer_denom()
# this should be the same as ExpBase.as_numer_denom wrt
# exponent handling
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
int_exp = exp.is_integer
# the denominator cannot be separated from the numerator if
# its sign is unknown unless the exponent is an integer, e.g.
# sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the
# denominator is negative the numerator and denominator can
# be negated and the denominator (now positive) separated.
if not (d.is_extended_real or int_exp):
n = base
d = S.One
dnonpos = d.is_nonpositive
if dnonpos:
n, d = -n, -d
elif dnonpos is None and not int_exp:
n = base
d = S.One
if neg_exp:
n, d = d, n
exp = -exp
if exp.is_infinite:
if n is S.One and d is not S.One:
return n, self.func(d, exp)
if n is not S.One and d is S.One:
return self.func(n, exp), d
return self.func(n, exp), self.func(d, exp)
def matches(self, expr, repl_dict={}, old=False):
expr = _sympify(expr)
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp.matches(S.Zero, d)
if d is not None:
return d
# make sure the expression to be matched is an Expr
if not isinstance(expr, Expr):
return None
b, e = expr.as_base_exp()
# special case number
sb, se = self.as_base_exp()
if sb.is_Symbol and se.is_Integer and expr:
if e.is_rational:
return sb.matches(b**(e/se), repl_dict)
return sb.matches(expr**(1/se), repl_dict)
d = repl_dict.copy()
d = self.base.matches(b, d)
if d is None:
return None
d = self.exp.xreplace(d).matches(e, d)
if d is None:
return Expr.matches(self, expr, repl_dict)
return d
def _eval_nseries(self, x, n, logx):
# NOTE! This function is an important part of the gruntz algorithm
# for computing limits. It has to return a generalized power
# series with coefficients in C(log, log(x)). In more detail:
# It has to return an expression
# c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i are
# expressions involving only numbers, the log function, and log(x).
from sympy import ceiling, collect, exp, log, O, Order, powsimp
b, e = self.args
if e.is_Integer:
if e > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x - x**3/3 + ...)**4 = ...
return expand_multinomial(self.func(b._eval_nseries(x, n=n,
logx=logx), e), deep=False)
elif e is S.NegativeOne:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 - x + x**2 - x**3 ...
# so we need to rewrite base to the form "1 + x"
nuse = n
cf = 1
try:
ord = b.as_leading_term(x)
cf = Order(ord, x).getn()
if cf and cf.is_Number:
nuse = n + 2*ceiling(cf)
else:
cf = 1
except NotImplementedError:
pass
b_orig, prefactor = b, O(1, x)
while prefactor.is_Order:
nuse += 1
b = b_orig._eval_nseries(x, n=nuse, logx=logx)
prefactor = b.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = expand_mul((b - prefactor)/prefactor)
if rest.is_Order:
return 1/prefactor + rest/prefactor + O(x**n, x)
k, l = rest.leadterm(x)
if l.is_Rational and l > 0:
pass
elif l.is_number and l > 0:
l = l.evalf()
elif l == 0:
k = k.simplify()
if k == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
if cf < 0:
cf = S.One/abs(cf)
try:
dn = Order(1/prefactor, x).getn()
if dn and dn < 0:
pass
else:
dn = 0
except NotImplementedError:
dn = 0
terms = [1/prefactor]
for m in range(1, ceiling((n - dn + 1)/l*cf)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(
deep=False)
else:
new_term = expand_mul(new_term, deep=False)
terms.append(new_term)
terms.append(O(x**n, x))
return powsimp(Add(*terms), deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for
# example:
# sin(x)**(-4) = 1/(sin(x)**4) = ...
# and expand the denominator:
nuse, denominator = n, O(1, x)
while denominator.is_Order:
denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx)
nuse += 1
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator)._eval_nseries(x, n=n, logx=logx)
if e.has(Symbol):
return exp(e*log(b))._eval_nseries(x, n=n, logx=logx)
# see if the base is as simple as possible
bx = b
while bx.is_Pow and bx.exp.is_Rational:
bx = bx.base
if bx == x:
return self
# work for b(x)**e where e is not an Integer and does not contain x
# and hopefully has no other symbols
def e2int(e):
"""return the integer value (if possible) of e and a
flag indicating whether it is bounded or not."""
n = e.limit(x, 0)
infinite = n.is_infinite
if not infinite:
# XXX was int or floor intended? int used to behave like floor
# so int(-Rational(1, 2)) returned -1 rather than int's 0
try:
n = int(n)
except TypeError:
# well, the n is something more complicated (like 1 + log(2))
try:
n = int(n.evalf()) + 1 # XXX why is 1 being added?
except TypeError:
pass # hope that base allows this to be resolved
n = _sympify(n)
return n, infinite
order = O(x**n, x)
ei, infinite = e2int(e)
b0 = b.limit(x, 0)
if infinite and (b0 is S.One or b0.has(Symbol)):
# XXX what order
if b0 is S.One:
resid = (b - 1)
if resid.is_positive:
return S.Infinity
elif resid.is_negative:
return S.Zero
raise ValueError('cannot determine sign of %s' % resid)
return b0**ei
if (b0 is S.Zero or b0.is_infinite):
if infinite is not False:
return b0**e # XXX what order
if not ei.is_number: # if not, how will we proceed?
raise ValueError(
'expecting numerical exponent but got %s' % ei)
nuse = n - ei
if e.is_extended_real and e.is_positive:
lt = b.as_leading_term(x)
# Try to correct nuse (= m) guess from:
# (lt + rest + O(x**m))**e =
# lt**e*(1 + rest/lt + O(x**m)/lt)**e =
# lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n)
try:
cf = Order(lt, x).getn()
nuse = ceiling(n - cf*(e - 1))
except NotImplementedError:
pass
bs = b._eval_nseries(x, n=nuse, logx=logx)
terms = bs.removeO()
if terms.is_Add:
bs = terms
lt = terms.as_leading_term(x)
# bs -> lt + rest -> lt*(1 + (bs/lt - 1))
return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries(
x, n=nuse, logx=logx)).expand() + order)
if bs.is_Add:
from sympy import O
# So, bs + O() == terms
c = Dummy('c')
res = []
for arg in bs.args:
if arg.is_Order:
arg = c*arg.expr
res.append(arg)
bs = Add(*res)
rv = (bs**e).series(x).subs(c, O(1, x))
rv += order
return rv
rv = bs**e
if terms != bs:
rv += order
return rv
# either b0 is bounded but neither 1 nor 0 or e is infinite
# b -> b0 + (b - b0) -> b0 * (1 + (b/b0 - 1))
o2 = order*(b0**-e)
z = (b/b0 - 1)
o = O(z, x)
if o is S.Zero or o2 is S.Zero:
infinite = True
else:
if o.expr.is_number:
e2 = log(o2.expr*x)/log(x)
else:
e2 = log(o2.expr)/log(o.expr)
n, infinite = e2int(e2)
if infinite:
# requested accuracy gives infinite series,
# order is probably non-polynomial e.g. O(exp(-1/x), x).
r = 1 + z
else:
l = []
g = None
for i in range(n + 2):
g = self._taylor_term(i, z, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
r = Add(*l)
return expand_mul(r*b0**e) + order
def _eval_as_leading_term(self, x):
from sympy import exp, log
if not self.exp.has(x):
return self.func(self.base.as_leading_term(x), self.exp)
return exp(self.exp * log(self.base)).as_leading_term(x)
@cacheit
def _taylor_term(self, n, x, *previous_terms): # of (1 + x)**e
from sympy import binomial
return binomial(self.exp, n) * self.func(x, n)
def _sage_(self):
return self.args[0]._sage_()**self.args[1]._sage_()
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> sqrt(4 + 4*sqrt(2)).as_content_primitive()
(2, sqrt(1 + sqrt(2)))
>>> sqrt(3 + 3*sqrt(2)).as_content_primitive()
(1, sqrt(3)*sqrt(1 + sqrt(2)))
>>> from sympy import expand_power_base, powsimp, Mul
>>> from sympy.abc import x, y
>>> ((2*x + 2)**2).as_content_primitive()
(4, (x + 1)**2)
>>> (4**((1 + y)/2)).as_content_primitive()
(2, 4**(y/2))
>>> (3**((1 + y)/2)).as_content_primitive()
(1, 3**((y + 1)/2))
>>> (3**((5 + y)/2)).as_content_primitive()
(9, 3**((y + 1)/2))
>>> eq = 3**(2 + 2*x)
>>> powsimp(eq) == eq
True
>>> eq.as_content_primitive()
(9, 3**(2*x))
>>> powsimp(Mul(*_))
3**(2*x + 2)
>>> eq = (2 + 2*x)**y
>>> s = expand_power_base(eq); s.is_Mul, s
(False, (2*x + 2)**y)
>>> eq.as_content_primitive()
(1, (2*(x + 1))**y)
>>> s = expand_power_base(_[1]); s.is_Mul, s
(True, 2**y*(x + 1)**y)
See docstring of Expr.as_content_primitive for more examples.
"""
b, e = self.as_base_exp()
b = _keep_coeff(*b.as_content_primitive(radical=radical, clear=clear))
ce, pe = e.as_content_primitive(radical=radical, clear=clear)
if b.is_Rational:
#e
#= ce*pe
#= ce*(h + t)
#= ce*h + ce*t
#=> self
#= b**(ce*h)*b**(ce*t)
#= b**(cehp/cehq)*b**(ce*t)
#= b**(iceh + r/cehq)*b**(ce*t)
#= b**(iceh)*b**(r/cehq)*b**(ce*t)
#= b**(iceh)*b**(ce*t + r/cehq)
h, t = pe.as_coeff_Add()
if h.is_Rational:
ceh = ce*h
c = self.func(b, ceh)
r = S.Zero
if not c.is_Rational:
iceh, r = divmod(ceh.p, ceh.q)
c = self.func(b, iceh)
return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q))
e = _keep_coeff(ce, pe)
# b**e = (h*t)**e = h**e*t**e = c*m*t**e
if e.is_Rational and b.is_Mul:
h, t = b.as_content_primitive(radical=radical, clear=clear) # h is positive
c, m = self.func(h, e).as_coeff_Mul() # so c is positive
m, me = m.as_base_exp()
if m is S.One or me == e: # probably always true
# return the following, not return c, m*Pow(t, e)
# which would change Pow into Mul; we let sympy
# decide what to do by using the unevaluated Mul, e.g
# should it stay as sqrt(2 + 2*sqrt(5)) or become
# sqrt(2)*sqrt(1 + sqrt(5))
return c, self.func(_keep_coeff(m, t), e)
return S.One, self.func(b, e)
def is_constant(self, *wrt, **flags):
expr = self
if flags.get('simplify', True):
expr = expr.simplify()
b, e = expr.as_base_exp()
bz = b.equals(0)
if bz: # recalculate with assumptions in case it's unevaluated
new = b**e
if new != expr:
return new.is_constant()
econ = e.is_constant(*wrt)
bcon = b.is_constant(*wrt)
if bcon:
if econ:
return True
bz = b.equals(0)
if bz is False:
return False
elif bcon is None:
return None
return e.equals(0)
def _eval_difference_delta(self, n, step):
b, e = self.args
if e.has(n) and not b.has(n):
new_e = e.subs(n, n + step)
return (b**(new_e - e) - 1) * self
from .add import Add
from .numbers import Integer
from .mul import Mul, _keep_coeff
from .symbol import Symbol, Dummy, symbols
|
// export default Factory.extend({
// name(i) {
// return "User " + i;
// },
// });
|
'''
Created on Nov 23, 2019
@author: ballance
'''
import cocotb
from cocotb.bfms import BfmMgr
from fwrisc_rv32i_tests.instr_tests import InstrTests
from fwrisc_tracer_bfm.fwrisc_tracer_signal_bfm import FwriscTracerSignalBfm
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from sys import stdout
class ZephyrTests(InstrTests):
def __init__(self, tracer_bfm):
super().__init__(tracer_bfm)
self.max_instr = 0
self.halt_addr = -1
tracer_bfm.add_listener(self)
sw_image = cocotb.plusargs["SW_IMAGE"]
self.raw_console = False
self.console_buffer = ""
self.console_output = []
with open(sw_image, "rb") as f:
elffile = ELFFile(f)
symtab = elffile.get_section_by_name(".symtab")
self.ram_console_addr = symtab.get_symbol_by_name("ram_console")[0]["st_value"]
tracer_bfm.add_addr_region(self.ram_console_addr, self.ram_console_addr+1023)
def configure_tracer(self):
self.tracer_bfm.set_trace_reg_writes(0)
self.tracer_bfm.set_trace_instr(0, 0, 0)
self.tracer_bfm.set_trace_all_memwrite(0)
self.tracer_bfm.add_addr_region(
self.ram_console_addr,
self.ram_console_addr+1023)
def instr_exec(self, pc, instr):
# print("instr_exec: 0x%08x" % (pc))
pass
def mem_write(self, maddr, mstrb, mdata):
# print("mem_write: 0x%08x 0x%08x" % (maddr, mdata))
if maddr >= self.ram_console_addr and maddr < self.ram_console_addr+1024 and mdata != 0:
ch = 0
if mstrb == 1:
ch = ((mdata >> 0) & 0xFF)
elif mstrb == 2:
ch = ((mdata >> 8) & 0xFF)
elif mstrb == 4:
ch = ((mdata >> 16) & 0xFF)
elif mstrb == 8:
ch = ((mdata >> 24) & 0xFF)
ch = str(chr(ch))
if ch == '\n':
self.console_line(self.console_buffer)
if not self.raw_console:
print(self.console_buffer)
stdout.flush()
self.console_buffer = ""
else:
self.console_buffer += ch
if self.raw_console:
stdout.write(ch)
stdout.flush()
def console_line(self, line):
self.console_output.append(line)
@cocotb.coroutine
def run(self):
# Configure the tracer BFM
self.configure_tracer()
yield self.test_done_ev.wait()
pass
@cocotb.coroutine
def check(self):
print("Check")
pass
@cocotb.test()
def runtest(dut):
use_tf_bfm = True
if use_tf_bfm:
tracer_bfm = BfmMgr.find_bfm(".*u_tracer")
else:
tracer_bfm = FwriscTracerSignalBfm(dut.u_dut.u_core.u_tracer)
test = ZephyrTests(tracer_bfm)
yield test.run()
|
const _ = require('lodash');
const { newMessage } = require('../helpers');
const ApiClient = require('../apiClient');
const processEventData = require('../processEventDataHelper').processEventData;
function getCalendars(cfg, cb)
{
function processData(items)
{
let result = {};
_.forEach(items.value, function setItem(item)
{
result[item.id] = item.name;
});
return result;
}
const instance = new ApiClient(cfg);
return instance
.get('/me/calendars')
.then(processData)
.nodeify(cb);
}
function processAction(msg, cfg)
{
const self = this;
const calendarId = cfg.calendarId;
const apiCall = `/me/calendars/${calendarId}/events`;
const instance = new ApiClient(cfg, self);
function createEvent(postRequestBody)
{
return instance.post(apiCall, postRequestBody);
}
function emitData(data)
{
const id = data.id;
const messageBody = _.omitBy(data, (value, key) => key.startsWith('@odata.'));
messageBody.calendarId = cfg.calendarId;
self.emit('data', newMessage(messageBody));
}
function emitError(e)
{
self.emit('error', e);
}
function emitEnd()
{
self.emit('end');
}
let promise = processEventData(cfg, msg.body)
.then(createEvent)
.then(emitData)
.fail(emitError);
return promise.finally(emitEnd);
}
module.exports.process = processAction;
module.exports.getCalendars = getCalendars;
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = invariant;
function invariant(condition, format, a, b, c, d, e, f) {
if (process.env.NODE_ENV === 'production') return;
if (arguments.length < 2) {
throw new Error('invalid argument count.');
}
if (!condition) {
const args = [a, b, c, d, e, f];
let argIndex = 0;
const error = new Error(format.replace(/%s/g, () => args[argIndex++]));
error.name = 'Invariant Violation';
throw error;
}
}
|
import React, { Component } from "react";
import axios from "axios";
import Layout from "../../components/Layout";
import CharacterCard from "../../components/CharacterCard";
import { getEpisode } from "../../api";
class Episode extends Component {
constructor(props) {
super(props);
this.state = {
episode: null,
characters: [],
hasLoaded: false,
hasError: false,
errorMessage: null,
};
this.loadEpisode = this.loadEpisode.bind(this);
}
componentDidMount() {
const { match } = this.props;
const { episodeId } = match.params;
this.loadEpisode(episodeId);
}
async loadEpisode(episodeId) {
try {
const { data } = await getEpisode(episodeId);
const promises = data.characters.map((character) => axios.get(character));
const charactersResponse = await Promise.all(promises);
const characters = charactersResponse.map((character) => character.data);
this.setState({
hasLoaded: true,
episode: data,
characters: characters,
});
} catch (e) {
this.setState({
hasLoaded: true,
hasError: true,
errorMessage: e.message,
});
}
}
render() {
const {
episode,
characters,
hasLoaded,
hasError,
errorMessage,
} = this.state;
return (
<Layout>
<section className="row">
{!hasLoaded && (
<div className="col col-12">
<p>Episode not loaded...</p>
</div>
)}
{hasLoaded && (
<div className="col col-12">
<h5>{episode.name}</h5>
<hr />
<p>{`${episode.episode} | ${episode.air_date}`}</p>
<hr />
</div>
)}
{hasError && (
<div className="col col-12">
<p>Episode error...</p>
<p>{errorMessage}</p>
</div>
)}
{characters.length > 0 &&
characters.map((character) => (
<CharacterCard
key={character.id}
id={character.id}
name={character.name}
image={character.image}
species={character.species}
status={character.status}
origin={character.origin}
location={character.location}
/>
))}
</section>
</Layout>
);
}
}
export default Episode;
|
/****************************************************************************
Copyright (c) 2014 cocos2d-x.org
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#ifndef __TestCpp__TextFieldReader__
#define __TestCpp__TextFieldReader__
#include "WidgetReader/WidgetReader.h"
#include "CocosStudioExport.h"
namespace cocostudio
{
class CCS_DLL TextFieldReader : public WidgetReader
{
DECLARE_CLASS_NODE_READER_INFO
public:
TextFieldReader();
virtual ~TextFieldReader();
static TextFieldReader* getInstance();
/** @deprecated Use method destroyInstance() instead */
CC_DEPRECATED_ATTRIBUTE static void purge();
static void destroyInstance();
virtual void setPropsFromJsonDictionary(cocos2d::ui::Widget* widget, const rapidjson::Value& options);
virtual void setPropsFromBinary(cocos2d::ui::Widget* widget, CocoLoader* cocoLoader, stExpCocoNode* pCocoNode) ;
flatbuffers::Offset<flatbuffers::Table> createOptionsWithFlatBuffers(pugi::xml_node objectData,
flatbuffers::FlatBufferBuilder* builder);
void setPropsWithFlatBuffers(cocos2d::Node* node, const flatbuffers::Table* textFieldOptions);
cocos2d::Node* createNodeWithFlatBuffers(const flatbuffers::Table* textFieldOptions);
};
}
#endif /* defined(__TestCpp__TextFieldReader__) */
|
//------------------------------------------------------------------------------
// gb_get_shallow: create a shallow copy of a MATLAB sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// A = gb_get_shallow (X) constructs a shallow GrB_Matrix from a MATLAB
// mxArray, which can either be a MATLAB sparse matrix (double, complex, or
// logical) or a MATLAB struct that contains a GraphBLAS matrix.
// X must not be NULL, but it can be an empty matrix, as X = [ ] or even X = ''
// (the empty string). In this case, A is returned as NULL. This is not an
// error here, since the caller might be getting an optional input matrix, such
// as Cin or the Mask.
#include "gb_matlab.h"
#define IF(error,message) \
CHECK_ERROR (error, "invalid GraphBLAS struct (" message ")" ) ;
GrB_Matrix gb_get_shallow // return a shallow copy of MATLAB sparse matrix
(
const mxArray *X
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
CHECK_ERROR (X == NULL, "matrix missing") ;
//--------------------------------------------------------------------------
// construct the shallow GrB_Matrix
//--------------------------------------------------------------------------
GrB_Matrix A ;
if (gb_mxarray_is_empty (X))
{
//----------------------------------------------------------------------
// matrix is empty
//----------------------------------------------------------------------
// X is a 0-by-0 MATLAB matrix. Create a new 0-by-0 matrix of the same
// type as X, with the default format.
OK (GrB_Matrix_new (&A, gb_mxarray_type (X), 0, 0)) ;
}
else if (mxIsStruct (X))
{
//----------------------------------------------------------------------
// construct a shallow GrB_Matrix copy from a MATLAB struct
//----------------------------------------------------------------------
// get the type
mxArray *mx_type = mxGetField (X, 0, "GraphBLAS") ;
CHECK_ERROR (mx_type == NULL, "not a GraphBLAS struct") ;
GrB_Type type = gb_mxstring_to_type (mx_type) ;
// allocate the header, with no content
OK (GrB_Matrix_new (&A, type, 0, 0)) ;
gb_mxfree (&(A->p)) ;
gb_mxfree (&(A->h)) ;
gb_mxfree (&(A->i)) ;
gb_mxfree (&(A->x)) ;
// get the scalar info
mxArray *opaque = mxGetField (X, 0, "s") ;
IF (opaque == NULL, ".s missing") ;
double *s = mxGetDoubles (opaque) ;
A->hyper_ratio = s [0] ;
A->plen = (int64_t) s [1] ;
A->vlen = (int64_t) s [2] ;
A->vdim = (int64_t) s [3] ;
A->nvec = (int64_t) s [4] ;
A->nvec_nonempty = (int64_t) s [5] ;
A->is_hyper = (int64_t) s [6] ;
A->is_csc = (int64_t) s [7] ; // format already defined
A->nzmax = (int64_t) s [8] ;
// get the pointers
mxArray *Ap = mxGetField (X, 0, "p") ;
IF (Ap == NULL, ".p missing") ;
IF (mxGetM (Ap) != 1, ".p wrong size") ;
IF (mxGetN (Ap) != A->plen+1, ".p wrong size") ;
A->p = mxGetInt64s (Ap) ;
IF (A->p == NULL, ".p wrong type") ;
// get the indices
mxArray *Ai = mxGetField (X, 0, "i") ;
IF (Ai == NULL, ".i missing") ;
IF (mxGetM (Ai) != 1, ".i wrong size") ;
IF (mxGetN (Ai) != MAX (A->nzmax, 1), ".i wrong size") ;
A->i = (A->nzmax == 0) ? NULL : mxGetInt64s (Ai) ;
IF (A->i == NULL && A->nzmax > 0, ".i wrong type") ;
// get the values
mxArray *Ax = mxGetField (X, 0, "x") ;
IF (Ax == NULL, ".x missing") ;
IF (mxGetM (Ax) != 1, ".x wrong size") ;
IF (mxGetN (Ax) != MAX (A->type_size*A->nzmax, 1), ".x wrong size") ;
A->x = (A->nzmax == 0) ? NULL : ((void *) mxGetUint8s (Ax)) ;
IF (A->x == NULL && A->nzmax > 0, ".x wrong type") ;
A->h = NULL ;
if (A->is_hyper)
{
// get the hyperlist
mxArray *Ah = mxGetField (X, 0, "h") ;
IF (Ah == NULL, ".h missing") ;
IF (mxGetM (Ah) != 1, ".h wrong size") ;
IF (mxGetN (Ah) != MAX (A->plen, 1), ".h wrong size") ;
A->h = (void *) mxGetInt64s (Ah) ;
IF (A->h == NULL, ".h wrong type") ;
}
// tell GraphBLAS the matrix is shallow
A->p_shallow = true ;
A->i_shallow = (A->i != NULL) ;
A->x_shallow = (A->x != NULL) ;
A->h_shallow = (A->h != NULL) ;
// matrix is now initialized
A->magic = GB_MAGIC ;
}
else
{
//----------------------------------------------------------------------
// construct a shallow GrB_Matrix copy of a MATLAB matrix
//----------------------------------------------------------------------
// get the type and dimensions
bool X_is_sparse = mxIsSparse (X) ;
GrB_Type type = gb_mxarray_type (X) ;
GrB_Index nrows = (GrB_Index) mxGetM (X) ;
GrB_Index ncols = (GrB_Index) mxGetN (X) ;
// get Xp, Xi, nzmax, or create them
GrB_Index *Xp, *Xi, nzmax ;
if (X_is_sparse)
{
// get the nzmax, Xp, and Xi from the MATLAB sparse matrix X
nzmax = (GrB_Index) mxGetNzmax (X) ;
Xp = (GrB_Index *) mxGetJc (X) ;
Xi = (GrB_Index *) mxGetIr (X) ;
}
else
{
// X is a MATLAB dense matrix; create a partially shallow
// GrB_Matrix copy by allocating the row indices Xi and pointers Xp
// but keeping Xx shallow.
nzmax = MAX (nrows * ncols, 1) ;
Xp = (GrB_Index *) mxMalloc ((ncols+1) * sizeof (GrB_Index)) ;
Xi = (GrB_Index *) mxMalloc (nzmax * sizeof (GrB_Index)) ;
GB_matlab_helper2 (Xp, Xi, (int64_t) ncols, (int64_t) nrows) ;
}
// get the numeric data
void *Xx = NULL ;
if (type == GrB_FP64)
{
// MATLAB sparse or dense double matrix
Xx = mxGetDoubles (X) ;
}
#ifdef GB_COMPLEX_TYPE
else if (type == gb_complex_type)
{
// MATLAB sparse or dense double complex matrix
Xx = mxGetComplexDoubles (X) ;
}
#endif
else if (type == GrB_BOOL)
{
// MATLAB sparse or dense logical matrix
Xx = mxGetData (X) ;
}
else
{
// MATLAB does not support any other kinds of sparse matrices
if (X_is_sparse)
{
ERROR ("unsupported type") ;
}
else if (type == GrB_INT8)
{
Xx = mxGetInt8s (X) ;
}
else if (type == GrB_INT16)
{
Xx = mxGetInt16s (X) ;
}
else if (type == GrB_INT32)
{
Xx = mxGetInt32s (X) ;
}
else if (type == GrB_INT64)
{
Xx = mxGetInt64s (X) ;
}
else if (type == GrB_UINT8)
{
Xx = mxGetUint8s (X) ;
}
else if (type == GrB_UINT16)
{
Xx = mxGetUint16s (X) ;
}
else if (type == GrB_UINT32)
{
Xx = mxGetUint32s (X) ;
}
else if (type == GrB_UINT64)
{
Xx = mxGetUint64s (X) ;
}
else if (type == GrB_FP32)
{
Xx = mxGetSingles (X) ;
}
else
{
ERROR ("unsupported type") ;
}
}
// import the matrix in CSC format. This sets Xp, Xi, and Xx to NULL,
// but it does not change the MATLAB matrix they came from.
OK (GxB_Matrix_import_CSC (&A, type, nrows, ncols, nzmax, -1,
&Xp, &Xi, &Xx, NULL)) ;
// tell GraphBLAS the matrix is shallow
if (X_is_sparse)
{
A->p_shallow = true ;
A->i_shallow = (A->i != NULL) ;
}
else
{
A->p_shallow = false ;
A->i_shallow = false ;
}
A->h_shallow = (A->h != NULL) ;
A->x_shallow = (A->x != NULL) ;
}
//--------------------------------------------------------------------------
// return the result
//--------------------------------------------------------------------------
return (A) ;
}
|
// Chernobog (9/6/21)
// Tharis Hotel
#include "../defs.h"
inherit "/std/barkeep";
void create() {
::create();
set_name("room service");
set_id(({"roomservice","servant","marionette"}));
set_short("%^BOLD%^%^BLACK%^A %^RESET%^featureless %^BOLD%^%^BLACK%^eb%^RESET%^o%^BOLD%^%^BLACK%^ny m%^RESET%^a%^BOLD%^%^BLACK%^r%^RESET%^i%^BOLD%^o%^BLACK%^ne%^RESET%^t%^BOLD%^t%^BLACK%^e%^RESET%^%^MAGENTA%^, pushing a serving cart for %^BOLD%^%^BLACK%^The %^RESET%^%^MAGENTA%^V%^BOLD%^io%^RESET%^%^MAGENTA%^l%^BOLD%^e%^RESET%^%^MAGENTA%^t %^BOLD%^%^BLACK%^Hotel%^RESET%^%^RESET%^");
set("aggressive", 0);
set_level(10);
set_long("%^BOLD%^%^BLACK%^This %^RESET%^alien%^BOLD%^%^BLACK%^-looking golem serves as wait staff at %^RESET%^%^MAGENTA%^The Violet%^BOLD%^%^BLACK%^. It is a slender, tall construct formed from what looks to be the same %^RESET%^l%^BOLD%^%^BLACK%^u%^WHITE%^s%^RESET%^t%^BOLD%^%^BLACK%^r%^WHITE%^o%^RESET%^u%^BOLD%^%^BLACK%^s eb%^RESET%^o%^BOLD%^%^BLACK%^ny w%^RESET%^o%^BOLD%^o%^BLACK%^d present throughout the hotel. It is dressed in a sharp black suit with %^RESET%^%^MAGENTA%^v%^BOLD%^io%^RESET%^%^MAGENTA%^l%^BOLD%^e%^RESET%^%^MAGENTA%^t %^BOLD%^%^BLACK%^p%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^nstr%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^pes and a br%^RESET%^oca%^BOLD%^%^BLACK%^d%^RESET%^e %^BOLD%^%^BLACK%^s%^RESET%^i%^BOLD%^l%^RESET%^v%^BOLD%^%^BLACK%^er v%^RESET%^e%^BOLD%^s%^BLACK%^t. Its head is humanoid in shape, but where there would normally be a face, instead is a sm%^RESET%^o%^BOLD%^o%^BLACK%^th featureless expanse %^RESET%^buffed %^BOLD%^%^BLACK%^to a r%^RESET%^e%^BOLD%^%^BLACK%^f%^RESET%^l%^BOLD%^e%^RESET%^c%^BOLD%^%^BLACK%^t%^RESET%^i%^BOLD%^%^BLACK%^ve sh%^RESET%^ee%^BOLD%^%^BLACK%^n. There is no distinguishable personality to this construct; it maintains its %^RESET%^silent vigilance%^BOLD%^%^BLACK%^.%^RESET%^");
set_gender("other");
set_alignment(6);
set_race("human");
set_hd(10,0);
set_exp(10);
set_max_hp(query_hp());
add_money("copper", random(200));
set_property("no_random_treasure",1);
set_currency("gold");
set_menu(
({"soup","salad","calamari","filet mignon","lamb","halibut","creme brulee","cobbler","cake","water","tea","lemonade","tequila twilight","whiskey sour","vodka martini","blood wine","chardonnay"}),
({"food","food","food","food","food","food","food","food","food","water","soft drink","soft drink","alcoholic","alcoholic","alcoholic","alcoholic","alcoholic"}),
({20,25,30,60,70,80,20,25,30,5,10,15,15,20,30,40,50}),
);
set_my_mess(({
"%^RESET%^%^ORANGE%^The soup is thick and %^BOLD%^%^WHITE%^creamy%^RESET%^%^ORANGE%^, filling your belly with warmth.%^WHITE%^",
"%^RESET%^%^GREEN%^This refreshing salad is a delightful mix of %^ORANGE%^savory%^GREEN%^, %^BOLD%^sour%^RESET%^%^GREEN%^, and %^BOLD%^%^MAGENTA%^sweet %^RESET%^%^GREEN%^flavors.%^RESET%^",
"%^YELLOW%^You squeeze the lemon over the calamari and dip one of the tender rings into the %^RED%^sauce %^ORANGE%^before popping into your mouth. %^RED%^Delicious%^ORANGE%^!%^RESET%^",
"%^BOLD%^%^BLACK%^You cut into the st%^RESET%^%^ORANGE%^e%^BOLD%^%^BLACK%^ak and take a bite. It is cooked to %^RESET%^%^ORANGE%^perfection%^BOLD%^%^BLACK%^, melting in your mouth.%^RESET%^",
"%^RESET%^%^RED%^You take a bite of the %^ORANGE%^tender %^BOLD%^%^WHITE%^lamb %^RESET%^%^RED%^shank, the rosemary tantalizing your senses.%^WHITE%^",
"%^RESET%^%^CYAN%^The fine bread crumb crust and %^BOLD%^%^RED%^tangy %^ORANGE%^m%^RESET%^%^ORANGE%^u%^BOLD%^st%^RESET%^%^ORANGE%^a%^BOLD%^rd %^RESET%^%^CYAN%^enhance the texture and flavor of this wonderfully %^ORANGE%^flaky %^CYAN%^fish.%^WHITE%^",
"%^RESET%^The cr%^ORANGE%^e%^WHITE%^m%^ORANGE%^e %^WHITE%^br%^ORANGE%^u%^WHITE%^l%^ORANGE%^e%^WHITE%^e is wonderfully creamy and the caramelized %^ORANGE%^brown sugar crust %^WHITE%^melts in your mouth.",
"%^RESET%^%^ORANGE%^The pe%^BOLD%^%^MAGENTA%^a%^RESET%^%^ORANGE%^ch%^BOLD%^%^MAGENTA%^e%^RESET%^%^ORANGE%^s in this c%^BOLD%^%^MAGENTA%^o%^RESET%^%^ORANGE%^bbl%^BOLD%^%^MAGENTA%^e%^RESET%^%^ORANGE%^r are wonderfully %^BOLD%^%^MAGENTA%^sweet %^RESET%^%^ORANGE%^with just the right amount of %^BOLD%^%^MAGENTA%^tartness%^RESET%^%^ORANGE%^.%^WHITE%^",
"%^BOLD%^%^BLACK%^The c%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^ke is %^MAGENTA%^exquisite%^BLACK%^, but so r%^RESET%^%^ORANGE%^i%^BOLD%^%^BLACK%^ch you can barely finish it!%^RESET%^",
"%^BOLD%^%^BLUE%^The %^WHITE%^ice%^BLUE%^-cold %^RESET%^%^GREEN%^minty %^BOLD%^%^BLUE%^water hits the spot!%^RESET%^",
"%^RESET%^%^MAGENTA%^The relaxing l%^BOLD%^a%^RESET%^%^MAGENTA%^v%^BOLD%^e%^RESET%^%^MAGENTA%^nd%^BOLD%^e%^RESET%^%^MAGENTA%^r %^BOLD%^scent %^RESET%^%^MAGENTA%^of the %^BOLD%^%^RED%^hot %^RESET%^%^MAGENTA%^tea calms you.%^WHITE%^",
"%^YELLOW%^The %^MAGENTA%^tangy %^ORANGE%^sourness of the lemonade is almost too much!%^RESET%^",
"%^BOLD%^%^BLACK%^The t%^RESET%^%^MAGENTA%^e%^BOLD%^%^BLACK%^qu%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^la is smooth on the way down, %^RESET%^%^RED%^warming %^BOLD%^%^BLACK%^your belly.%^RESET%^",
"%^RESET%^%^ORANGE%^The fr%^BOLD%^o%^RESET%^%^ORANGE%^thy e%^BOLD%^g%^RESET%^%^ORANGE%^g %^BOLD%^%^WHITE%^white %^RESET%^%^ORANGE%^offers a %^BOLD%^%^WHITE%^richness %^RESET%^%^ORANGE%^to this %^BOLD%^sour %^RESET%^%^ORANGE%^concoction.%^WHITE%^",
"%^BOLD%^%^WHITE%^This m%^RESET%^%^GREEN%^a%^BOLD%^%^WHITE%^rt%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^n%^RESET%^%^GREEN%^i %^BOLD%^%^WHITE%^is, in fact, quite d%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^rty!%^RESET%^",
"%^RESET%^%^RED%^This rich wine has legs for days! You notice subtle flavors of %^ORANGE%^oak%^RED%^, %^BOLD%^%^WHITE%^vanilla%^RESET%^%^RED%^, and even a hint of %^BOLD%^%^BLACK%^tobacco%^RESET%^%^RED%^.%^WHITE%^",
"%^YELLOW%^Even the glass the ch%^WHITE%^a%^ORANGE%^rd%^WHITE%^o%^ORANGE%^nn%^WHITE%^a%^ORANGE%^y is served in has been %^CYAN%^chilled%^ORANGE%^. You detect hints of %^RED%^apple %^ORANGE%^and lemon in this crisp ch%^WHITE%^a%^ORANGE%^rd%^WHITE%^o%^ORANGE%^nn%^WHITE%^a%^ORANGE%^y.%^RESET%^"
}));
set_your_mess(({
"%^RESET%^%^ORANGE%^enjoys a bowl of %^BOLD%^%^WHITE%^creamy %^RESET%^%^ORANGE%^pumpkin soup.%^WHITE%^",
"%^RESET%^%^GREEN%^enjoys a refreshing salad.%^RESET%^",
"%^YELLOW%^enjoys some fried calamari.%^RESET%^",
"%^BOLD%^%^BLACK%^enjoys the filet mignon and %^RESET%^%^GREEN%^s%^BOLD%^e%^RESET%^%^GREEN%^ason%^BOLD%^a%^RESET%^%^GREEN%^l v%^BOLD%^e%^RESET%^%^GREEN%^get%^BOLD%^a%^RESET%^%^GREEN%^bles%^BOLD%^%^BLACK%^.%^RESET%^",
"%^RESET%^%^RED%^enjoys the lamb shank, the smell of rosemary permeating the air.%^WHITE%^",
"%^RESET%^%^CYAN%^enjoys a plate of %^ORANGE%^Dijon-crusted %^CYAN%^fish with %^GREEN%^s%^BOLD%^e%^RESET%^%^GREEN%^ason%^BOLD%^a%^RESET%^%^GREEN%^l v%^BOLD%^e%^RESET%^%^GREEN%^get%^BOLD%^a%^RESET%^%^GREEN%^bles%^CYAN%^.%^WHITE%^",
"%^RESET%^enjoys the cr%^ORANGE%^e%^WHITE%^m%^ORANGE%^e %^WHITE%^br%^ORANGE%^u%^WHITE%^l%^ORANGE%^e%^WHITE%^e. ",
"%^RESET%^%^ORANGE%^enjoys the pe%^BOLD%^%^MAGENTA%^a%^RESET%^%^ORANGE%^ch c%^BOLD%^%^MAGENTA%^o%^RESET%^%^ORANGE%^bbl%^BOLD%^%^MAGENTA%^e%^RESET%^%^ORANGE%^r.%^WHITE%^",
"%^BOLD%^%^BLACK%^enjoys a piece of %^MAGENTA%^decadent %^BLACK%^ch%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^c%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^l%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^te c%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^ke%^RESET%^",
"%^BOLD%^%^BLUE%^enjoys an %^WHITE%^ice%^BLUE%^-cold glass of water.%^RESET%^",
"%^RESET%^%^MAGENTA%^enjoys some %^BOLD%^fragrant %^RESET%^%^MAGENTA%^tea.%^WHITE%^",
"%^YELLOW%^enjoys a tall glass of lemonade.%^RESET%^",
"%^BOLD%^%^BLACK%^sips a %^RESET%^%^MAGENTA%^violet%^BOLD%^%^BLACK%^-c%^RESET%^%^MAGENTA%^o%^BOLD%^%^BLACK%^l%^RESET%^%^MAGENTA%^o%^BOLD%^%^BLACK%^red cocktail.%^RESET%^",
"%^RESET%^%^ORANGE%^drinks a %^BOLD%^sunny %^RESET%^%^ORANGE%^c%^BOLD%^o%^RESET%^%^ORANGE%^ckta%^BOLD%^i%^RESET%^%^ORANGE%^l garnished with an orange peel.%^WHITE%^",
"%^BOLD%^%^WHITE%^puts their pinky up as they sip their d%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^rty m%^RESET%^%^GREEN%^a%^BOLD%^%^WHITE%^rt%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^n%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^. Very fancy!%^RESET%^",
"%^RESET%^%^RED%^drinks deeply from their glass of b%^BOLD%^u%^RESET%^%^RED%^rg%^BOLD%^u%^RESET%^%^RED%^ndy w%^BOLD%^i%^RESET%^%^RED%^ne.%^WHITE%^",
"%^YELLOW%^enjoys a glass of %^CYAN%^chilled %^ORANGE%^ch%^WHITE%^a%^ORANGE%^rd%^WHITE%^o%^ORANGE%^nn%^WHITE%^a%^ORANGE%^y.%^RESET%^"
}));
set_menu_short(({
"%^RESET%^%^ORANGE%^Pumpkin Cream Soup%^WHITE%^",
"%^RESET%^%^GREEN%^Th%^BOLD%^a%^RESET%^%^GREEN%^r%^BOLD%^i%^RESET%^%^GREEN%^s%^BOLD%^i%^RESET%^%^GREEN%^an S%^BOLD%^a%^RESET%^%^GREEN%^l%^BOLD%^a%^RESET%^%^GREEN%^d%^WHITE%^",
"%^YELLOW%^Fried Calamari%^RESET%^",
"%^BOLD%^%^BLACK%^F%^RESET%^%^ORANGE%^i%^BOLD%^%^BLACK%^l%^RESET%^%^ORANGE%^e%^BOLD%^%^BLACK%^t M%^RESET%^%^ORANGE%^i%^BOLD%^%^BLACK%^gn%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^n%^RESET%^",
"%^RESET%^%^RED%^Rosemary Braised %^BOLD%^%^WHITE%^Lamb %^RESET%^%^RED%^Shank%^RESET%^",
"%^RESET%^%^ORANGE%^Dijon%^CYAN%^-Crusted %^BOLD%^%^WHITE%^Halibut%^RESET%^",
"%^RESET%^Cr%^ORANGE%^e%^WHITE%^m%^ORANGE%^e %^WHITE%^Br%^ORANGE%^u%^WHITE%^l%^ORANGE%^e%^WHITE%^e",
"%^RESET%^%^ORANGE%^Pe%^BOLD%^%^MAGENTA%^a%^RESET%^%^ORANGE%^ch C%^BOLD%^%^MAGENTA%^o%^RESET%^%^ORANGE%^bbl%^BOLD%^%^MAGENTA%^e%^RESET%^%^ORANGE%^r%^WHITE%^",
"%^BOLD%^%^BLACK%^Ch%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^c%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^l%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^te C%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^ke%^RESET%^",
"%^BOLD%^%^BLUE%^W%^WHITE%^a%^BLUE%^t%^WHITE%^e%^BLUE%^r%^RESET%^",
"%^RESET%^%^MAGENTA%^L%^BOLD%^a%^RESET%^%^MAGENTA%^v%^BOLD%^e%^RESET%^%^MAGENTA%^nd%^BOLD%^e%^RESET%^%^MAGENTA%^r Tea%^WHITE%^",
"%^YELLOW%^Lemonade%^RESET%^",
"%^BOLD%^%^BLACK%^T%^RESET%^%^MAGENTA%^e%^BOLD%^%^BLACK%^qu%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^la Tw%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^l%^RESET%^%^MAGENTA%^i%^BOLD%^%^BLACK%^ght%^RESET%^",
"%^RESET%^%^ORANGE%^Wh%^BOLD%^i%^RESET%^%^ORANGE%^sk%^BOLD%^e%^RESET%^%^ORANGE%^y %^BOLD%^S%^RESET%^%^ORANGE%^o%^BOLD%^ur%^RESET%^",
"%^BOLD%^D%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^rty M%^RESET%^%^GREEN%^a%^BOLD%^%^WHITE%^rt%^RESET%^%^GREEN%^i%^BOLD%^%^WHITE%^n%^RESET%^%^GREEN%^i%^WHITE%^",
"%^BOLD%^%^RED%^B%^RESET%^%^RED%^loo%^BOLD%^d %^RESET%^%^RED%^W%^BOLD%^i%^RESET%^%^RED%^ne%^WHITE%^",
"%^YELLOW%^Ch%^WHITE%^a%^ORANGE%^rd%^WHITE%^o%^ORANGE%^nn%^WHITE%^a%^ORANGE%^y%^RESET%^"
}));
set_menu_long(({
"%^RESET%^%^ORANGE%^A thick, vibrant orange soup with sw%^BOLD%^%^WHITE%^i%^RESET%^%^ORANGE%^rls of %^BOLD%^%^WHITE%^rich cream%^RESET%^%^ORANGE%^.%^WHITE%^ ",
"%^RESET%^%^GREEN%^A collection of roasted root vegetables on top of a medley of greens, topped with dried %^RED%^cranberries%^GREEN%^, %^BOLD%^%^WHITE%^goat cheese%^RESET%^%^GREEN%^, and a %^ORANGE%^balsamic vinaigrette%^GREEN%^.%^WHITE%^",
"%^YELLOW%^A plate of %^RESET%^%^ORANGE%^lightly fried %^BOLD%^calamari with a side of %^RED%^tangy dipping sauce %^ORANGE%^with lemon wedges.%^RESET%^ ",
"%^BOLD%^%^BLACK%^A perfectly cooked cut of f%^RESET%^%^ORANGE%^i%^BOLD%^%^BLACK%^l%^RESET%^%^ORANGE%^e%^BOLD%^%^BLACK%^t m%^RESET%^%^ORANGE%^i%^BOLD%^%^BLACK%^gn%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^n accompanied by %^RESET%^%^GREEN%^s%^BOLD%^e%^RESET%^%^GREEN%^ason%^BOLD%^a%^RESET%^%^GREEN%^l v%^BOLD%^e%^RESET%^%^GREEN%^get%^BOLD%^a%^RESET%^%^GREEN%^bles%^BOLD%^%^BLACK%^.%^RESET%^",
"%^RESET%^%^RED%^A large %^BOLD%^%^WHITE%^lamb %^RESET%^%^RED%^shank resting on a bed of %^BOLD%^%^WHITE%^creamy %^RESET%^mashed potatoes%^RED%^.%^WHITE%^",
"%^RESET%^%^CYAN%^A plate of %^RESET%^%^ORANGE%^Dijon%^RESET%^%^CYAN%^-crusted %^BOLD%^%^WHITE%^halibut %^RESET%^%^CYAN%^on a bed of %^GREEN%^s%^BOLD%^e%^RESET%^%^GREEN%^ason%^BOLD%^a%^RESET%^%^GREEN%^l v%^BOLD%^e%^RESET%^%^GREEN%^get%^BOLD%^a%^RESET%^%^GREEN%^bles%^CYAN%^.%^WHITE%^",
"%^RESET%^Cr%^ORANGE%^e%^WHITE%^m%^ORANGE%^e %^WHITE%^br%^ORANGE%^u%^WHITE%^l%^ORANGE%^e%^WHITE%^e with a c%^ORANGE%^a%^WHITE%^r%^ORANGE%^a%^WHITE%^m%^ORANGE%^e%^WHITE%^l%^ORANGE%^i%^WHITE%^z%^ORANGE%^e%^WHITE%^d %^ORANGE%^brown sugar crust%^WHITE%^.",
"%^RESET%^%^ORANGE%^An ooey gooey heap of pe%^BOLD%^%^MAGENTA%^a%^RESET%^%^ORANGE%^ch c%^BOLD%^%^MAGENTA%^o%^RESET%^%^ORANGE%^bbl%^BOLD%^%^MAGENTA%^e%^RESET%^%^ORANGE%^r topped with a scoop of %^BOLD%^%^WHITE%^vanilla ice cream%^RESET%^%^ORANGE%^.%^WHITE%^",
"%^BOLD%^%^BLACK%^A decadent 13-layer ch%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^c%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^l%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^te c%^RESET%^%^ORANGE%^a%^BOLD%^%^BLACK%^ke. Every other layer alternates between dark chocolate and a %^RESET%^%^ORANGE%^milk chocolate %^BOLD%^%^BLACK%^m%^RESET%^%^ORANGE%^o%^BOLD%^%^BLACK%^uss%^RESET%^%^ORANGE%^e%^BOLD%^%^BLACK%^.%^RESET%^",
"%^BOLD%^%^BLUE%^A fr%^WHITE%^o%^BLUE%^st%^WHITE%^y %^BLUE%^gl%^WHITE%^a%^BLUE%^ss of refreshing %^WHITE%^ice %^BLUE%^water garnished with %^RESET%^%^GREEN%^mint%^BOLD%^%^BLUE%^.%^RESET%^",
"%^RESET%^%^MAGENTA%^A steaming cup of %^BOLD%^fragrant %^RESET%^%^MAGENTA%^tea.%^WHITE%^ ",
"%^YELLOW%^A tall of sweet and sour lemonade garnished with %^RED%^bright red cherries%^ORANGE%^.%^RESET%^",
"%^BOLD%^%^BLACK%^This drink is infused with muddled d%^RESET%^%^MAGENTA%^a%^BOLD%^%^BLACK%^rk %^RESET%^%^MAGENTA%^berries%^BOLD%^%^BLACK%^, giving it a d%^RESET%^%^MAGENTA%^a%^BOLD%^%^BLACK%^rk %^RESET%^%^MAGENTA%^violet %^BOLD%^%^BLACK%^h%^RESET%^%^MAGENTA%^u%^BOLD%^%^BLACK%^e.%^RESET%^",
"%^RESET%^%^ORANGE%^This %^BOLD%^sunny %^RESET%^%^ORANGE%^dr%^BOLD%^i%^RESET%^%^ORANGE%^nk has a fr%^BOLD%^o%^RESET%^%^ORANGE%^thy f%^BOLD%^o%^RESET%^%^ORANGE%^am and is garnished with an orange peel.%^WHITE%^",
"%^BOLD%^Three %^RESET%^%^GREEN%^green olives %^BOLD%^%^WHITE%^impaled on a %^RESET%^%^ORANGE%^wooden toothpick %^BOLD%^%^WHITE%^rest in a bath of unassuming clear liquid.%^RESET%^",
"%^RESET%^%^RED%^A large, round glass of r%^BOLD%^i%^RESET%^%^RED%^ch b%^BOLD%^u%^RESET%^%^RED%^rg%^BOLD%^u%^RESET%^%^RED%^ndy w%^BOLD%^i%^RESET%^%^RED%^ne.%^WHITE%^",
"%^YELLOW%^A tall glass of %^CYAN%^chilled %^ORANGE%^ch%^WHITE%^a%^ORANGE%^rd%^WHITE%^o%^ORANGE%^nn%^WHITE%^a%^ORANGE%^y%^RESET%^"
}));
}
|
#ifndef BBTSUBDEFS_H
#define BBTSUBDEFS_H 1
/***** Don't use PixRect code on 386i for now *****/
/***** -or on any machine that doesn't support it (HP, e.g.) *****/
/********************************************************/
/* */
/* prropstyle is DEFINED when we want to use */
/* pixrect versions of the operations in this */
/* file, and UNDEFINED, when we want to use */
/* Don Charnley's bitblt code to do them. */
/* */
/********************************************************/
#if defined(SUNDISPLAY) && \
!defined(NOPIXRECT) && \
!defined(NEWBITBLT)
#define prropstyle 1
#endif
void bitbltsub(LispPTR *argv);
LispPTR n_new_cursorin(DLword *baseaddr, int dx, int dy, int w, int h);
LispPTR bitblt_bitmap(LispPTR *args);
LispPTR bitshade_bitmap(LispPTR *args);
#ifndef prropstyle
void bltchar(LispPTR *args);
void newbltchar(LispPTR *args);
#else
LispPTR bltchar(LispPTR *args);
LispPTR newbltchar(LispPTR *args);
#endif
void ccfuncall(unsigned int atom_index, int argnum, int bytenum);
void tedit_bltchar(LispPTR *args);
#endif
|
// WARNING: Please don't edit this file. It was generated by C++/WinRT v2.0.200615.7
#ifndef WINRT_Windows_Storage_Search_2_H
#define WINRT_Windows_Storage_Search_2_H
#include "winrt/impl/Windows.Foundation.Collections.1.h"
#include "winrt/impl/Windows.Storage.Search.1.h"
WINRT_EXPORT namespace winrt::Windows::Storage::Search
{
struct SortEntry
{
hstring PropertyName;
bool AscendingOrder;
};
inline bool operator==(SortEntry const& left, SortEntry const& right) noexcept
{
return left.PropertyName == right.PropertyName && left.AscendingOrder == right.AscendingOrder;
}
inline bool operator!=(SortEntry const& left, SortEntry const& right) noexcept
{
return !(left == right);
}
struct __declspec(empty_bases) ContentIndexer : Windows::Storage::Search::IContentIndexer,
impl::require<ContentIndexer, Windows::Storage::Search::IContentIndexerQueryOperations>
{
ContentIndexer(std::nullptr_t) noexcept {}
ContentIndexer(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IContentIndexer(ptr, take_ownership_from_abi) {}
static auto GetIndexer(param::hstring const& indexName);
static auto GetIndexer();
};
struct __declspec(empty_bases) ContentIndexerQuery : Windows::Storage::Search::IContentIndexerQuery
{
ContentIndexerQuery(std::nullptr_t) noexcept {}
ContentIndexerQuery(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IContentIndexerQuery(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) IndexableContent : Windows::Storage::Search::IIndexableContent
{
IndexableContent(std::nullptr_t) noexcept {}
IndexableContent(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IIndexableContent(ptr, take_ownership_from_abi) {}
IndexableContent();
};
struct __declspec(empty_bases) QueryOptions : Windows::Storage::Search::IQueryOptions,
impl::require<QueryOptions, Windows::Storage::Search::IQueryOptionsWithProviderFilter>
{
QueryOptions(std::nullptr_t) noexcept {}
QueryOptions(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IQueryOptions(ptr, take_ownership_from_abi) {}
QueryOptions();
QueryOptions(Windows::Storage::Search::CommonFileQuery const& query, param::iterable<hstring> const& fileTypeFilter);
explicit QueryOptions(Windows::Storage::Search::CommonFolderQuery const& query);
};
struct __declspec(empty_bases) SortEntryVector : Windows::Foundation::Collections::IVector<Windows::Storage::Search::SortEntry>
{
SortEntryVector(std::nullptr_t) noexcept {}
SortEntryVector(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Foundation::Collections::IVector<Windows::Storage::Search::SortEntry>(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) StorageFileQueryResult : Windows::Storage::Search::IStorageFileQueryResult,
impl::require<StorageFileQueryResult, Windows::Storage::Search::IStorageFileQueryResult2>
{
StorageFileQueryResult(std::nullptr_t) noexcept {}
StorageFileQueryResult(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IStorageFileQueryResult(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) StorageFolderQueryResult : Windows::Storage::Search::IStorageFolderQueryResult
{
StorageFolderQueryResult(std::nullptr_t) noexcept {}
StorageFolderQueryResult(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IStorageFolderQueryResult(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) StorageItemQueryResult : Windows::Storage::Search::IStorageItemQueryResult
{
StorageItemQueryResult(std::nullptr_t) noexcept {}
StorageItemQueryResult(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IStorageItemQueryResult(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) StorageLibraryChangeTrackerTriggerDetails : Windows::Storage::Search::IStorageLibraryChangeTrackerTriggerDetails
{
StorageLibraryChangeTrackerTriggerDetails(std::nullptr_t) noexcept {}
StorageLibraryChangeTrackerTriggerDetails(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IStorageLibraryChangeTrackerTriggerDetails(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) StorageLibraryContentChangedTriggerDetails : Windows::Storage::Search::IStorageLibraryContentChangedTriggerDetails
{
StorageLibraryContentChangedTriggerDetails(std::nullptr_t) noexcept {}
StorageLibraryContentChangedTriggerDetails(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IStorageLibraryContentChangedTriggerDetails(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) ValueAndLanguage : Windows::Storage::Search::IValueAndLanguage
{
ValueAndLanguage(std::nullptr_t) noexcept {}
ValueAndLanguage(void* ptr, take_ownership_from_abi_t) noexcept : Windows::Storage::Search::IValueAndLanguage(ptr, take_ownership_from_abi) {}
ValueAndLanguage();
};
}
#endif
|
import PropTypes from 'prop-types';
import React, {Component, lazy, Suspense} from 'react';
import LazyLoader from '../LazyLoader';
const RealNeedlePlot = lazy(LazyLoader.needlePlot);
/**
* The Needle Plot component is used to visualize large datasets
* containing categorical or numerical data. The lines and markers in
* the plot correspond to bars in a histogram.
**/
export default class NeedlePlot extends Component {
render() {
return (
<Suspense fallback={null}>
<RealNeedlePlot {...this.props} />
</Suspense>
);
}
}
NeedlePlot.propTypes = {
/**
* The ID of this component, used to identify dash components
* in callbacks. The ID needs to be unique across all of the
* components in an app.
*/
id: PropTypes.string,
/**
* The data that are displayed on the plot
*/
mutationData: PropTypes.shape({
/*
coordinate of mutations on the protein sequence
*/
x: PropTypes.oneOfType([PropTypes.string, PropTypes.array]),
/* value (could be the sample count), this property is not necessarily
relevant, should match x in size
*/
y: PropTypes.oneOfType([PropTypes.string, PropTypes.array]),
/*
type of mutations, should match x in size
*/
mutationGroups: PropTypes.arrayOf(PropTypes.string),
/*
protein domains coordinates on the protein sequence
*/
domains: PropTypes.array,
}),
/**
* Title of the x-axis.
**/
xlabel: PropTypes.string,
/**
* Title of the y-axis.
**/
ylabel: PropTypes.string,
/**
* If true, enables a rangeslider for the x-axis.
**/
rangeSlider: PropTypes.bool,
/**
* Options for the needle marking single site mutations
*/
needleStyle: PropTypes.shape({
// Color of the stems of the needles
stemColor: PropTypes.string,
// Thickness of the stems of the needles
stemThickness: PropTypes.number,
// Decides whether all stems have same height or not
stemConstHeight: PropTypes.bool,
// Size of the heads of the needlehead
headSize: PropTypes.number,
// Color of the heads of the needlehead
headColor: PropTypes.oneOfType([
/* different color for different mutations, must be larger or
equal to the size of the mutationGroup prop
*/
PropTypes.array,
// same color for all needles
PropTypes.string,
]),
// Style of the heads of the needlehead
headSymbol: PropTypes.oneOfType([
/* different marker for different mutations, must be larger or
equal to the size of the mutationGroup prop
*/
PropTypes.array,
// same marker for all needles
PropTypes.string,
]),
}),
/**
* Options for the protein domain coloring
*/
domainStyle: PropTypes.shape({
// Color of the protein domains
domainColor: PropTypes.array,
/*
the prop x sometimes contains smaller domains (e.g. multi-site
mutations), if true, they are displayed
*/
displayMinorDomains: PropTypes.bool,
}),
/**
* Dash-assigned callback that should be called whenever any of the
* properties change
*/
setProps: PropTypes.func,
};
NeedlePlot.defaultProps = {
mutationData: {
x: [],
y: [],
domains: [],
mutationGroups: [],
},
rangeSlider: false,
needleStyle: {
stemColor: '#444',
stemThickness: 0.5,
stemConstHeight: false,
headSize: 5,
headColor: [
'#e41a1c',
'#377eb8',
'#4daf4a',
'#984ea3',
'#ff7f00',
'#ffff33',
'#a65628',
'#f781bf',
'#999999',
'#e41a1c',
'#377eb8',
'#4daf4a',
'#984ea3',
'#ff7f00',
'#ffff33',
'#a65628',
'#f781bf',
'#999999',
'#e41a1c',
],
headSymbol: 'circle',
},
domainStyle: {
displayMinorDomains: false,
domainColor: [
'#8dd3c7',
'#ffffb3',
'#bebada',
'#fb8072',
'#80b1d3',
'#fdb462',
'#b3de69',
'#fccde5',
'#d9d9d9',
'#bc80bd',
'#ccebc5',
'#ffed6f',
'#8dd3c7',
'#ffffb3',
'#bebada',
'#fb8072',
'#80b1d3',
'#fdb462',
'#b3de69',
],
},
};
export const defaultProps = NeedlePlot.defaultProps;
export const propTypes = NeedlePlot.propTypes;
|
"""
ASGI config for dateTester project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dateTester.settings')
application = get_asgi_application()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (C) yanghongshun@gmail.com
#
import os,sys,configparser,getopt
import csv
# 1、对单个文件按主列(gene_name (mus_musculus_ensembl_v80_Genes))对行进行分组
# 2、对单个文件分组后的每一组按找从列(Frequency)求和
# note:
# 分组的原则根据主列中每一行的名字,进行归类,同名归属一组
# 3、支持文件批量求和,从整体上看,对位于不同文件的主列进行分组,原则同上。也就是跨文件分组。
# 4、支持合并后的分组进行求和
#http://stackoverflow.com/questions/2387697/best-way-to-convert-csv-data-to-dict
#http://www.tbk.ren/article/168.html?from=similar
def usage():
print('...')
def saveDataToCSV(title,data,filePath,fmt=''):
print("saving data to csv file:%s" % filePath)
if os.path.isfile(filePath):
print("delete old csv file:%s" % filePath)
os.remove(filePath)
file_handle = open(filePath,'w')
if fmt=='':
csv_writer = csv.writer(file_handle,delimiter=' ')
else:
csv_writer = csv.writer(file_handle,delimiter=fmt)
if len(title) >0 :
csv_writer.writerow(title)
csv_writer.writerows(data)
file_handle.close()
print("saved end")
def generateResultFilePath(dataFilePath,prefix=''):
print("generating result file path from data file path:%s" % dataFilePath)
filename,fileext=os.path.splitext(os.path.basename(dataFilePath))
if prefix=='':
resultFileName = 'result_'+filename+'.csv'
else:
resultFileName = 'result'+prefix+filename+'.csv'
dataFileAbsPath = os.path.abspath(dataFilePath)
app_root_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
app_data_dir = app_root_dir + os.sep + APP_DATA_DIRNAME+os.sep
app_result_dir = app_root_dir + os.sep + APP_RESULT_DIRNAME+os.sep
result_tmp_dirstr = os.path.dirname(dataFileAbsPath).replace(app_data_dir,'')
resultFileDir = os.path.join(app_result_dir,result_tmp_dirstr)
if not os.path.exists(resultFileDir):
print("create directory:%s " % resultFileDir)
os.makedirs(resultFileDir)
resultFilePath = os.path.join(resultFileDir,resultFileName)
print("result file path is:%s" % resultFilePath)
print("generated end")
return resultFilePath
def setSumResultPath(groupColumn,followColumn):
print("setting sum result path")
result_tmp_dirstr = os.path.dirname(os.path.abspath(sys.argv[0]))
result_filename = 'g'+groupColumn+'_'+'f'+followColumn+'_'+'sum'
result_filename +='.csv'
sumResultPath = os.path.join(result_tmp_dirstr,APP_TOOLS_RESULT_DIRNAME,result_filename)
print("sum result path is:%s" % sumResultPath )
print("set end")
return sumResultPath
def sumByGroup(sumParams):
print("start acting")
input_path=sumParams['input_path']
groupColumnIndex=sumParams['group_column']
followColumnIndex=sumParams['follow_column']
csvDictData={}
if os.path.isdir(input_path):
print("result file is a directory:%s" % input_path)
for root,dirs,files in os.walk(os.path.abspath( input_path)):
for file in files:
filename,fileext=os.path.splitext(file)
if fileext=='.csv':
singleFileData = []
resultfileabspath = root+os.sep+file
##
print(resultfileabspath)
elif os.path.isfile(input_path):
print("input file is a single file:%s" % input_path)
resultfileabspath = os.path.abspath(input_path)
singleFileData = []
##
print(resultfileabspath)
def main():
try:
opts,args = getopt.getopt(sys.argv[1:],"hi:g:f:",["--help=","--input=","--group=","--follow="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
sumParams = {
'input_path':'',
'group_column':'',
'follow_column':''
}
for opt,arg in opts:
if opt in ('-h',"--help"):
usage()
sys.exit()
elif opt in ('-i','--input'):
sumParams['input_path']=arg
elif opt in ('-g','--group'):
sumParams['group_column']=arg
elif opt in ('-f','--follow'):
sumParams['follow_column']=arg
if sumParams['input_path'] != '':
sumByGroup(sumParams)
else:
sys.exit()
if __name__ == "__main__":
main()
|
/**@preserve GeneXus Java 10_3_12-110051 on December 12, 2020 19:27:24.41
*/
gx.evt.autoSkip = false;
gx.define('gamexampleentryuser', false, function () {
this.ServerClass = "gamexampleentryuser" ;
this.PackageName = "" ;
this.setObjectType("web");
this.setOnAjaxSessionTimeout("Warn");
this.hasEnterEvent = true;
this.skipOnEnter = false;
this.addKeyListener("5", "REFRESH");
this.addKeyListener("12", "CANCEL");
this.addKeyListener("1", "HELP");
this.autoRefresh = true;
this.SetStandaloneVars=function()
{
this.AV50String=gx.fn.getControlValue("vSTRING") ;
this.Gx_mode=gx.fn.getControlValue("vMODE") ;
};
this.Validv_Gender=function()
{
try {
var gxballoon = gx.util.balloon.getNew("vGENDER");
this.AnyError = 0;
}
catch(e){}
try {
if (gxballoon == null) return true; return gxballoon.show();
}
catch(e){}
return true ;
}
this.e162221_client=function()
{
this.setEventParameters([["String","vSTRING","AV50String"]], arguments[2]);
this.clearMessages();
gx.fn.setCtrlProperty("TBGO","Link", this.AV51URLProfile );
this.refreshOutputs([{av:'gx.fn.getCtrlProperty("TBGO","Link")',ctrl:'TBGO',prop:'Link'}]);
};
this.e172222_client=function()
{
this.refreshInputs([["URLProfile","vURLPROFILE","AV51URLProfile"]]);
this.clearMessages();
var gxEvtVar = this.getContextObject("vURLPROFILE");
this.refreshOutputs([]);
return gxEvtVar;
};
this.e122222_client=function()
{
this.executeServerEvent("ENTER", true, null, false, false);
};
this.e132222_client=function()
{
this.executeServerEvent("'ENABLEDISABLE'", false, null, false, false);
};
this.e142222_client=function()
{
this.executeServerEvent("VAUTHENTICATIONTYPENAME.ISVALID", true, null, false, true);
};
this.e182221_client=function()
{
this.executeServerEvent("CANCEL", true, null, false, false);
};
this.GXValidFnc = [];
var GXValidFnc = this.GXValidFnc ;
this.GXCtrlIds=[3,7,9,13,15,19,21,24,26,28,31,33,35,38,40,42,45,47,49,57,59,62,64,66,69,71,73,81,83,87,89,91,99,101,103,107,109,113,115,119,121,125,127,131,133,137,139,143,145,151,153,158,161,163,165,168,170,172,177,180,182,188];
this.GXLastCtrlId =188;
GXValidFnc[3]={fld:"TABLE1",grid:0};
GXValidFnc[7]={fld:"TBGUID", format:0,grid:0};
GXValidFnc[9]={lvl:0,type:"char",len:40,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vUSERID",gxz:"ZV53UserId",gxold:"OV53UserId",gxvar:"AV53UserId",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV53UserId=Value},v2z:function(Value){gx.O.ZV53UserId=Value},v2c:function(){gx.fn.setControlValue("vUSERID",gx.O.AV53UserId,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV53UserId=this.val()},val:function(){return gx.fn.getControlValue("vUSERID")},nac:gx.falseFn};
this.declareDomainHdlr( 9 , function() {
});
GXValidFnc[13]={fld:"TBNAMESPACE", format:0,grid:0};
GXValidFnc[15]={lvl:0,type:"char",len:60,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vUSERNAMESPACE",gxz:"ZV54UserNameSpace",gxold:"OV54UserNameSpace",gxvar:"AV54UserNameSpace",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV54UserNameSpace=Value},v2z:function(Value){gx.O.ZV54UserNameSpace=Value},v2c:function(){gx.fn.setControlValue("vUSERNAMESPACE",gx.O.AV54UserNameSpace,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV54UserNameSpace=this.val()},val:function(){return gx.fn.getControlValue("vUSERNAMESPACE")},nac:gx.falseFn};
this.declareDomainHdlr( 15 , function() {
});
GXValidFnc[19]={fld:"TBNAUTHENTICATIONTYPE", format:0,grid:0};
GXValidFnc[21]={lvl:0,type:"char",len:60,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:'e142222_client',rgrid:[],fld:"vAUTHENTICATIONTYPENAME",gxz:"ZV7AuthenticationTypeName",gxold:"OV7AuthenticationTypeName",gxvar:"AV7AuthenticationTypeName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"combo",v2v:function(Value){gx.O.AV7AuthenticationTypeName=Value},v2z:function(Value){gx.O.ZV7AuthenticationTypeName=Value},v2c:function(){gx.fn.setComboBoxValue("vAUTHENTICATIONTYPENAME",gx.O.AV7AuthenticationTypeName);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV7AuthenticationTypeName=this.val()},val:function(){return gx.fn.getControlValue("vAUTHENTICATIONTYPENAME")},nac:gx.falseFn};
this.declareDomainHdlr( 21 , function() {
});
GXValidFnc[24]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQNAME",gxz:"ZV45ReqName",gxold:"OV45ReqName",gxvar:"AV45ReqName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV45ReqName=Value},v2z:function(Value){gx.O.ZV45ReqName=Value},v2c:function(){gx.fn.setMultimediaValue("vREQNAME",gx.O.AV45ReqName,gx.O.AV58Reqname_GXI)},c2v:function(){gx.O.AV58Reqname_GXI=this.val_GXI();gx.O.AV45ReqName=this.val()},val:function(){return gx.fn.getBlobValue("vREQNAME")},val_GXI:function(){return gx.fn.getControlValue("vREQNAME_GXI")}, gxvar_GXI:'AV58Reqname_GXI',nac:gx.falseFn};
GXValidFnc[26]={fld:"TBNAME", format:0,grid:0};
GXValidFnc[28]={lvl:0,type:"svchar",len:100,dec:60,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vNAME",gxz:"ZV33Name",gxold:"OV33Name",gxvar:"AV33Name",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV33Name=Value},v2z:function(Value){gx.O.ZV33Name=Value},v2c:function(){gx.fn.setControlValue("vNAME",gx.O.AV33Name,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV33Name=this.val()},val:function(){return gx.fn.getControlValue("vNAME")},nac:gx.falseFn};
this.declareDomainHdlr( 28 , function() {
});
GXValidFnc[31]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQEMAIL",gxz:"ZV40ReqEmail",gxold:"OV40ReqEmail",gxvar:"AV40ReqEmail",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV40ReqEmail=Value},v2z:function(Value){gx.O.ZV40ReqEmail=Value},v2c:function(){gx.fn.setMultimediaValue("vREQEMAIL",gx.O.AV40ReqEmail,gx.O.AV59Reqemail_GXI)},c2v:function(){gx.O.AV59Reqemail_GXI=this.val_GXI();gx.O.AV40ReqEmail=this.val()},val:function(){return gx.fn.getBlobValue("vREQEMAIL")},val_GXI:function(){return gx.fn.getControlValue("vREQEMAIL_GXI")}, gxvar_GXI:'AV59Reqemail_GXI',nac:gx.falseFn};
GXValidFnc[33]={fld:"TBEMAIL", format:0,grid:0};
GXValidFnc[35]={lvl:0,type:"svchar",len:100,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vEMAIL",gxz:"ZV15EMail",gxold:"OV15EMail",gxvar:"AV15EMail",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV15EMail=Value},v2z:function(Value){gx.O.ZV15EMail=Value},v2c:function(){gx.fn.setControlValue("vEMAIL",gx.O.AV15EMail,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV15EMail=this.val()},val:function(){return gx.fn.getControlValue("vEMAIL")},nac:gx.falseFn};
this.declareDomainHdlr( 35 , function() {
});
GXValidFnc[38]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQFIRSTNAME",gxz:"ZV41ReqFirstName",gxold:"OV41ReqFirstName",gxvar:"AV41ReqFirstName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV41ReqFirstName=Value},v2z:function(Value){gx.O.ZV41ReqFirstName=Value},v2c:function(){gx.fn.setMultimediaValue("vREQFIRSTNAME",gx.O.AV41ReqFirstName,gx.O.AV60Reqfirstname_GXI)},c2v:function(){gx.O.AV60Reqfirstname_GXI=this.val_GXI();gx.O.AV41ReqFirstName=this.val()},val:function(){return gx.fn.getBlobValue("vREQFIRSTNAME")},val_GXI:function(){return gx.fn.getControlValue("vREQFIRSTNAME_GXI")}, gxvar_GXI:'AV60Reqfirstname_GXI',nac:gx.falseFn};
GXValidFnc[40]={fld:"TBFIRSTNAME", format:0,grid:0};
GXValidFnc[42]={lvl:0,type:"char",len:60,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vFIRSTNAME",gxz:"ZV20FirstName",gxold:"OV20FirstName",gxvar:"AV20FirstName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV20FirstName=Value},v2z:function(Value){gx.O.ZV20FirstName=Value},v2c:function(){gx.fn.setControlValue("vFIRSTNAME",gx.O.AV20FirstName,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV20FirstName=this.val()},val:function(){return gx.fn.getControlValue("vFIRSTNAME")},nac:gx.falseFn};
this.declareDomainHdlr( 42 , function() {
});
GXValidFnc[45]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQLASTNAME",gxz:"ZV44ReqLastName",gxold:"OV44ReqLastName",gxvar:"AV44ReqLastName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV44ReqLastName=Value},v2z:function(Value){gx.O.ZV44ReqLastName=Value},v2c:function(){gx.fn.setMultimediaValue("vREQLASTNAME",gx.O.AV44ReqLastName,gx.O.AV61Reqlastname_GXI)},c2v:function(){gx.O.AV61Reqlastname_GXI=this.val_GXI();gx.O.AV44ReqLastName=this.val()},val:function(){return gx.fn.getBlobValue("vREQLASTNAME")},val_GXI:function(){return gx.fn.getControlValue("vREQLASTNAME_GXI")}, gxvar_GXI:'AV61Reqlastname_GXI',nac:gx.falseFn};
GXValidFnc[47]={fld:"TBLASTNAME", format:0,grid:0};
GXValidFnc[49]={lvl:0,type:"char",len:60,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vLASTNAME",gxz:"ZV28LastName",gxold:"OV28LastName",gxvar:"AV28LastName",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV28LastName=Value},v2z:function(Value){gx.O.ZV28LastName=Value},v2c:function(){gx.fn.setControlValue("vLASTNAME",gx.O.AV28LastName,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV28LastName=this.val()},val:function(){return gx.fn.getControlValue("vLASTNAME")},nac:gx.falseFn};
this.declareDomainHdlr( 49 , function() {
});
GXValidFnc[57]={fld:"TBEXTERNALID", format:0,grid:0};
GXValidFnc[59]={lvl:0,type:"svchar",len:100,dec:60,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vEXTERNALID",gxz:"ZV18ExternalId",gxold:"OV18ExternalId",gxvar:"AV18ExternalId",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV18ExternalId=Value},v2z:function(Value){gx.O.ZV18ExternalId=Value},v2c:function(){gx.fn.setControlValue("vEXTERNALID",gx.O.AV18ExternalId,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV18ExternalId=this.val()},val:function(){return gx.fn.getControlValue("vEXTERNALID")},nac:gx.falseFn};
this.declareDomainHdlr( 59 , function() {
});
GXValidFnc[62]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQBIRTHDAY",gxz:"ZV39ReqBirthday",gxold:"OV39ReqBirthday",gxvar:"AV39ReqBirthday",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV39ReqBirthday=Value},v2z:function(Value){gx.O.ZV39ReqBirthday=Value},v2c:function(){gx.fn.setMultimediaValue("vREQBIRTHDAY",gx.O.AV39ReqBirthday,gx.O.AV62Reqbirthday_GXI)},c2v:function(){gx.O.AV62Reqbirthday_GXI=this.val_GXI();gx.O.AV39ReqBirthday=this.val()},val:function(){return gx.fn.getBlobValue("vREQBIRTHDAY")},val_GXI:function(){return gx.fn.getControlValue("vREQBIRTHDAY_GXI")}, gxvar_GXI:'AV62Reqbirthday_GXI',nac:gx.falseFn};
GXValidFnc[64]={fld:"TBBIRTHDAY", format:0,grid:0};
GXValidFnc[66]={lvl:0,type:"date",len:10,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vBIRTHDAY",gxz:"ZV11Birthday",gxold:"OV11Birthday",gxvar:"AV11Birthday",dp:{f:0,st:false,wn:false,mf:false,pic:"99/99/9999",dec:0},ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV11Birthday=gx.fn.toDatetimeValue(Value)},v2z:function(Value){gx.O.ZV11Birthday=gx.fn.toDatetimeValue(Value)},v2c:function(){gx.fn.setControlValue("vBIRTHDAY",gx.O.AV11Birthday,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV11Birthday=gx.fn.toDatetimeValue(this.val())},val:function(){return gx.fn.getControlValue("vBIRTHDAY")},nac:gx.falseFn};
this.declareDomainHdlr( 66 , function() {
});
GXValidFnc[69]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQGENDER",gxz:"ZV42ReqGender",gxold:"OV42ReqGender",gxvar:"AV42ReqGender",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV42ReqGender=Value},v2z:function(Value){gx.O.ZV42ReqGender=Value},v2c:function(){gx.fn.setMultimediaValue("vREQGENDER",gx.O.AV42ReqGender,gx.O.AV63Reqgender_GXI)},c2v:function(){gx.O.AV63Reqgender_GXI=this.val_GXI();gx.O.AV42ReqGender=this.val()},val:function(){return gx.fn.getBlobValue("vREQGENDER")},val_GXI:function(){return gx.fn.getControlValue("vREQGENDER_GXI")}, gxvar_GXI:'AV63Reqgender_GXI',nac:gx.falseFn};
GXValidFnc[71]={fld:"TBGENDER", format:0,grid:0};
GXValidFnc[73]={lvl:0,type:"char",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:this.Validv_Gender,isvalid:null,rgrid:[],fld:"vGENDER",gxz:"ZV21Gender",gxold:"OV21Gender",gxvar:"AV21Gender",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"combo",v2v:function(Value){gx.O.AV21Gender=Value},v2z:function(Value){gx.O.ZV21Gender=Value},v2c:function(){gx.fn.setComboBoxValue("vGENDER",gx.O.AV21Gender);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV21Gender=this.val()},val:function(){return gx.fn.getControlValue("vGENDER")},nac:gx.falseFn};
this.declareDomainHdlr( 73 , function() {
});
GXValidFnc[81]={fld:"TBURLIMAGE", format:0,grid:0};
GXValidFnc[83]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vIMAGE",gxz:"ZV22Image",gxold:"OV22Image",gxvar:"AV22Image",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV22Image=Value},v2z:function(Value){gx.O.ZV22Image=Value},v2c:function(){gx.fn.setMultimediaValue("vIMAGE",gx.O.AV22Image,gx.O.AV68Image_GXI)},c2v:function(){gx.O.AV68Image_GXI=this.val_GXI();gx.O.AV22Image=this.val()},val:function(){return gx.fn.getBlobValue("vIMAGE")},val_GXI:function(){return gx.fn.getControlValue("vIMAGE_GXI")}, gxvar_GXI:'AV68Image_GXI',nac:gx.falseFn};
GXValidFnc[87]={fld:"TBURLPROFILE", format:0,grid:0};
GXValidFnc[89]={fld:"TBGO", format:1,grid:0};
GXValidFnc[91]={lvl:0,type:"svchar",len:2048,dec:250,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vURLPROFILE",gxz:"ZV51URLProfile",gxold:"OV51URLProfile",gxvar:"AV51URLProfile",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV51URLProfile=Value},v2z:function(Value){gx.O.ZV51URLProfile=Value},v2c:function(){gx.fn.setControlValue("vURLPROFILE",gx.O.AV51URLProfile,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV51URLProfile=this.val()},val:function(){return gx.fn.getControlValue("vURLPROFILE")},nac:gx.falseFn};
this.declareDomainHdlr( 91 , function() {
});
GXValidFnc[99]={fld:"TBACTIVATED", format:0,grid:0};
GXValidFnc[101]={lvl:0,type:"boolean",len:4,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vISACTIVE",gxz:"ZV23IsActive",gxold:"OV23IsActive",gxvar:"AV23IsActive",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV23IsActive=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV23IsActive=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vISACTIVE",gx.O.AV23IsActive,true)},c2v:function(){gx.O.AV23IsActive=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vISACTIVE")},nac:gx.falseFn,values:['true','false']};
GXValidFnc[103]={lvl:0,type:"dtime",len:10,dec:5,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vACTIVATIONDATE",gxz:"ZV5ActivationDate",gxold:"OV5ActivationDate",gxvar:"AV5ActivationDate",dp:{f:0,st:true,wn:false,mf:false,pic:"99/99/9999 99:99",dec:5},ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV5ActivationDate=gx.fn.toDatetimeValue(Value)},v2z:function(Value){gx.O.ZV5ActivationDate=gx.fn.toDatetimeValue(Value)},v2c:function(){gx.fn.setControlValue("vACTIVATIONDATE",gx.O.AV5ActivationDate,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV5ActivationDate=gx.fn.toDatetimeValue(this.val())},val:function(){return gx.fn.getDateTimeValue("vACTIVATIONDATE")},nac:gx.falseFn};
this.declareDomainHdlr( 103 , function() {
});
GXValidFnc[107]={fld:"TBNOTRECIBEINF", format:0,grid:0};
GXValidFnc[109]={lvl:0,type:"boolean",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vDONTRECEIVEINFORMATION",gxz:"ZV14DontReceiveInformation",gxold:"OV14DontReceiveInformation",gxvar:"AV14DontReceiveInformation",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV14DontReceiveInformation=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV14DontReceiveInformation=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vDONTRECEIVEINFORMATION",gx.O.AV14DontReceiveInformation,true);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV14DontReceiveInformation=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vDONTRECEIVEINFORMATION")},nac:gx.falseFn,values:['true','false']};
this.declareDomainHdlr( 109 , function() {
});
GXValidFnc[113]={fld:"TBCANNOTCHANGEPWD", format:0,grid:0};
GXValidFnc[115]={lvl:0,type:"boolean",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vCANNOTCHANGEPASSWORD",gxz:"ZV12CannotChangePassword",gxold:"OV12CannotChangePassword",gxvar:"AV12CannotChangePassword",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV12CannotChangePassword=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV12CannotChangePassword=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vCANNOTCHANGEPASSWORD",gx.O.AV12CannotChangePassword,true);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV12CannotChangePassword=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vCANNOTCHANGEPASSWORD")},nac:gx.falseFn,values:['true','false']};
this.declareDomainHdlr( 115 , function() {
});
GXValidFnc[119]={fld:"TBMUSTCHANGEPWD", format:0,grid:0};
GXValidFnc[121]={lvl:0,type:"boolean",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vMUSTCHANGEPASSWORD",gxz:"ZV32MustChangePassword",gxold:"OV32MustChangePassword",gxvar:"AV32MustChangePassword",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV32MustChangePassword=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV32MustChangePassword=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vMUSTCHANGEPASSWORD",gx.O.AV32MustChangePassword,true);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV32MustChangePassword=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vMUSTCHANGEPASSWORD")},nac:gx.falseFn,values:['true','false']};
this.declareDomainHdlr( 121 , function() {
});
GXValidFnc[125]={fld:"TBPASSWORDNEVEREXPIRES", format:0,grid:0};
GXValidFnc[127]={lvl:0,type:"boolean",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vPASSWORDNEVEREXPIRES",gxz:"ZV37PasswordNeverExpires",gxold:"OV37PasswordNeverExpires",gxvar:"AV37PasswordNeverExpires",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV37PasswordNeverExpires=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV37PasswordNeverExpires=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vPASSWORDNEVEREXPIRES",gx.O.AV37PasswordNeverExpires,true);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV37PasswordNeverExpires=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vPASSWORDNEVEREXPIRES")},nac:gx.falseFn,values:['true','false']};
this.declareDomainHdlr( 127 , function() {
});
GXValidFnc[131]={fld:"TBISBLOCKED", format:0,grid:0};
GXValidFnc[133]={lvl:0,type:"boolean",len:4,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vISBLOCKED",gxz:"ZV24IsBlocked",gxold:"OV24IsBlocked",gxvar:"AV24IsBlocked",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV24IsBlocked=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV24IsBlocked=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vISBLOCKED",gx.O.AV24IsBlocked,true)},c2v:function(){gx.O.AV24IsBlocked=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vISBLOCKED")},nac:gx.falseFn,values:['true','false']};
GXValidFnc[137]={fld:"TBSECURITYPOLICY", format:0,grid:0};
GXValidFnc[139]={lvl:0,type:"int",len:9,dec:0,sign:false,pic:"ZZZZZZZZ9",ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vSECURITYPOLICYID",gxz:"ZV49SecurityPolicyId",gxold:"OV49SecurityPolicyId",gxvar:"AV49SecurityPolicyId",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"combo",v2v:function(Value){gx.O.AV49SecurityPolicyId=gx.num.intval(Value)},v2z:function(Value){gx.O.ZV49SecurityPolicyId=gx.num.intval(Value)},v2c:function(){gx.fn.setComboBoxValue("vSECURITYPOLICYID",gx.O.AV49SecurityPolicyId);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV49SecurityPolicyId=gx.num.intval(this.val())},val:function(){return gx.fn.getIntegerValue("vSECURITYPOLICYID",'.')},nac:gx.falseFn};
this.declareDomainHdlr( 139 , function() {
});
GXValidFnc[143]={fld:"TBENABLEDREPO", format:0,grid:0};
GXValidFnc[145]={lvl:0,type:"boolean",len:1,dec:0,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vISENABLEDINREPOSITORY",gxz:"ZV25IsEnabledInRepository",gxold:"OV25IsEnabledInRepository",gxvar:"AV25IsEnabledInRepository",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"checkbox",v2v:function(Value){gx.O.AV25IsEnabledInRepository=gx.lang.booleanValue(Value)},v2z:function(Value){gx.O.ZV25IsEnabledInRepository=gx.lang.booleanValue(Value)},v2c:function(){gx.fn.setCheckBoxValue("vISENABLEDINREPOSITORY",gx.O.AV25IsEnabledInRepository,true);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV25IsEnabledInRepository=gx.lang.booleanValue(this.val())},val:function(){return gx.fn.getControlValue("vISENABLEDINREPOSITORY")},nac:gx.falseFn,values:['true','false']};
this.declareDomainHdlr( 145 , function() {
});
GXValidFnc[151]={fld:"TBLASTAUTHENTICATION", format:0,grid:0};
GXValidFnc[153]={lvl:0,type:"dtime",len:8,dec:5,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vDATELASTAUTHENTICATION",gxz:"ZV13DateLastAuthentication",gxold:"OV13DateLastAuthentication",gxvar:"AV13DateLastAuthentication",dp:{f:0,st:true,wn:false,mf:false,pic:"99/99/99 99:99",dec:5},ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV13DateLastAuthentication=gx.fn.toDatetimeValue(Value)},v2z:function(Value){gx.O.ZV13DateLastAuthentication=gx.fn.toDatetimeValue(Value)},v2c:function(){gx.fn.setControlValue("vDATELASTAUTHENTICATION",gx.O.AV13DateLastAuthentication,0)},c2v:function(){gx.O.AV13DateLastAuthentication=gx.fn.toDatetimeValue(this.val())},val:function(){return gx.fn.getDateTimeValue("vDATELASTAUTHENTICATION")},nac:gx.falseFn};
GXValidFnc[158]={fld:"TABLEPASSWORD",grid:0};
GXValidFnc[161]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQPASSWORD",gxz:"ZV46ReqPassword",gxold:"OV46ReqPassword",gxvar:"AV46ReqPassword",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV46ReqPassword=Value},v2z:function(Value){gx.O.ZV46ReqPassword=Value},v2c:function(){gx.fn.setMultimediaValue("vREQPASSWORD",gx.O.AV46ReqPassword,gx.O.AV64Reqpassword_GXI)},c2v:function(){gx.O.AV64Reqpassword_GXI=this.val_GXI();gx.O.AV46ReqPassword=this.val()},val:function(){return gx.fn.getBlobValue("vREQPASSWORD")},val_GXI:function(){return gx.fn.getControlValue("vREQPASSWORD_GXI")}, gxvar_GXI:'AV64Reqpassword_GXI',nac:gx.falseFn};
GXValidFnc[163]={fld:"TBPWD", format:0,grid:0};
GXValidFnc[165]={lvl:0,type:"char",len:50,dec:0,sign:false,isPwd:true,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vPASSWORD",gxz:"ZV34Password",gxold:"OV34Password",gxvar:"AV34Password",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV34Password=Value},v2z:function(Value){gx.O.ZV34Password=Value},v2c:function(){gx.fn.setControlValue("vPASSWORD",gx.O.AV34Password,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV34Password=this.val()},val:function(){return gx.fn.getControlValue("vPASSWORD")},nac:gx.falseFn};
this.declareDomainHdlr( 165 , function() {
});
GXValidFnc[168]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQPASSWORD",gxz:"ZV46ReqPassword",gxold:"OV46ReqPassword",gxvar:"AV46ReqPassword",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV46ReqPassword=Value},v2z:function(Value){gx.O.ZV46ReqPassword=Value},v2c:function(){gx.fn.setMultimediaValue("vREQPASSWORD",gx.O.AV46ReqPassword,gx.O.AV64Reqpassword_GXI)},c2v:function(){gx.O.AV64Reqpassword_GXI=this.val_GXI();gx.O.AV46ReqPassword=this.val()},val:function(){return gx.fn.getBlobValue("vREQPASSWORD")},val_GXI:function(){return gx.fn.getControlValue("vREQPASSWORD_GXI")}, gxvar_GXI:'AV64Reqpassword_GXI',nac:gx.falseFn};
GXValidFnc[170]={fld:"TBPWDCONF", format:0,grid:0};
GXValidFnc[172]={lvl:0,type:"char",len:50,dec:0,sign:false,isPwd:true,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vPASSWORDCONF",gxz:"ZV35PasswordConf",gxold:"OV35PasswordConf",gxvar:"AV35PasswordConf",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV35PasswordConf=Value},v2z:function(Value){gx.O.ZV35PasswordConf=Value},v2c:function(){gx.fn.setControlValue("vPASSWORDCONF",gx.O.AV35PasswordConf,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.AV35PasswordConf=this.val()},val:function(){return gx.fn.getControlValue("vPASSWORDCONF")},nac:gx.falseFn};
this.declareDomainHdlr( 172 , function() {
});
GXValidFnc[177]={fld:"TABLEBTNS",grid:0};
GXValidFnc[180]={lvl:0,type:"bits",len:1024,dec:0,sign:false,ro:1,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vREQICON",gxz:"ZV43ReqIcon",gxold:"OV43ReqIcon",gxvar:"AV43ReqIcon",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.AV43ReqIcon=Value},v2z:function(Value){gx.O.ZV43ReqIcon=Value},v2c:function(){gx.fn.setMultimediaValue("vREQICON",gx.O.AV43ReqIcon,gx.O.AV65Reqicon_GXI)},c2v:function(){gx.O.AV65Reqicon_GXI=this.val_GXI();gx.O.AV43ReqIcon=this.val()},val:function(){return gx.fn.getBlobValue("vREQICON")},val_GXI:function(){return gx.fn.getControlValue("vREQICON_GXI")}, gxvar_GXI:'AV65Reqicon_GXI',nac:gx.falseFn};
GXValidFnc[182]={fld:"TBINFICONREQ", format:0,grid:0};
GXValidFnc[188]={fld:"TBLBUTTONS",grid:0};
this.AV53UserId = "" ;
this.ZV53UserId = "" ;
this.OV53UserId = "" ;
this.AV54UserNameSpace = "" ;
this.ZV54UserNameSpace = "" ;
this.OV54UserNameSpace = "" ;
this.AV7AuthenticationTypeName = "" ;
this.ZV7AuthenticationTypeName = "" ;
this.OV7AuthenticationTypeName = "" ;
this.AV58Reqname_GXI = "" ;
this.AV45ReqName = "" ;
this.ZV45ReqName = "" ;
this.OV45ReqName = "" ;
this.AV33Name = "" ;
this.ZV33Name = "" ;
this.OV33Name = "" ;
this.AV59Reqemail_GXI = "" ;
this.AV40ReqEmail = "" ;
this.ZV40ReqEmail = "" ;
this.OV40ReqEmail = "" ;
this.AV15EMail = "" ;
this.ZV15EMail = "" ;
this.OV15EMail = "" ;
this.AV60Reqfirstname_GXI = "" ;
this.AV41ReqFirstName = "" ;
this.ZV41ReqFirstName = "" ;
this.OV41ReqFirstName = "" ;
this.AV20FirstName = "" ;
this.ZV20FirstName = "" ;
this.OV20FirstName = "" ;
this.AV61Reqlastname_GXI = "" ;
this.AV44ReqLastName = "" ;
this.ZV44ReqLastName = "" ;
this.OV44ReqLastName = "" ;
this.AV28LastName = "" ;
this.ZV28LastName = "" ;
this.OV28LastName = "" ;
this.AV18ExternalId = "" ;
this.ZV18ExternalId = "" ;
this.OV18ExternalId = "" ;
this.AV62Reqbirthday_GXI = "" ;
this.AV39ReqBirthday = "" ;
this.ZV39ReqBirthday = "" ;
this.OV39ReqBirthday = "" ;
this.AV11Birthday = gx.date.nullDate() ;
this.ZV11Birthday = gx.date.nullDate() ;
this.OV11Birthday = gx.date.nullDate() ;
this.AV63Reqgender_GXI = "" ;
this.AV42ReqGender = "" ;
this.ZV42ReqGender = "" ;
this.OV42ReqGender = "" ;
this.AV21Gender = "" ;
this.ZV21Gender = "" ;
this.OV21Gender = "" ;
this.AV68Image_GXI = "" ;
this.AV22Image = "" ;
this.ZV22Image = "" ;
this.OV22Image = "" ;
this.AV51URLProfile = "" ;
this.ZV51URLProfile = "" ;
this.OV51URLProfile = "" ;
this.AV23IsActive = false ;
this.ZV23IsActive = false ;
this.OV23IsActive = false ;
this.AV5ActivationDate = gx.date.nullDate() ;
this.ZV5ActivationDate = gx.date.nullDate() ;
this.OV5ActivationDate = gx.date.nullDate() ;
this.AV14DontReceiveInformation = false ;
this.ZV14DontReceiveInformation = false ;
this.OV14DontReceiveInformation = false ;
this.AV12CannotChangePassword = false ;
this.ZV12CannotChangePassword = false ;
this.OV12CannotChangePassword = false ;
this.AV32MustChangePassword = false ;
this.ZV32MustChangePassword = false ;
this.OV32MustChangePassword = false ;
this.AV37PasswordNeverExpires = false ;
this.ZV37PasswordNeverExpires = false ;
this.OV37PasswordNeverExpires = false ;
this.AV24IsBlocked = false ;
this.ZV24IsBlocked = false ;
this.OV24IsBlocked = false ;
this.AV49SecurityPolicyId = 0 ;
this.ZV49SecurityPolicyId = 0 ;
this.OV49SecurityPolicyId = 0 ;
this.AV25IsEnabledInRepository = false ;
this.ZV25IsEnabledInRepository = false ;
this.OV25IsEnabledInRepository = false ;
this.AV13DateLastAuthentication = gx.date.nullDate() ;
this.ZV13DateLastAuthentication = gx.date.nullDate() ;
this.OV13DateLastAuthentication = gx.date.nullDate() ;
this.AV64Reqpassword_GXI = "" ;
this.AV46ReqPassword = "" ;
this.ZV46ReqPassword = "" ;
this.OV46ReqPassword = "" ;
this.AV34Password = "" ;
this.ZV34Password = "" ;
this.OV34Password = "" ;
this.AV35PasswordConf = "" ;
this.ZV35PasswordConf = "" ;
this.OV35PasswordConf = "" ;
this.AV65Reqicon_GXI = "" ;
this.AV43ReqIcon = "" ;
this.ZV43ReqIcon = "" ;
this.OV43ReqIcon = "" ;
this.AV53UserId = "" ;
this.AV54UserNameSpace = "" ;
this.AV7AuthenticationTypeName = "" ;
this.AV45ReqName = "" ;
this.AV33Name = "" ;
this.AV40ReqEmail = "" ;
this.AV15EMail = "" ;
this.AV41ReqFirstName = "" ;
this.AV20FirstName = "" ;
this.AV44ReqLastName = "" ;
this.AV28LastName = "" ;
this.AV18ExternalId = "" ;
this.AV39ReqBirthday = "" ;
this.AV11Birthday = gx.date.nullDate() ;
this.AV42ReqGender = "" ;
this.AV21Gender = "" ;
this.AV22Image = "" ;
this.AV51URLProfile = "" ;
this.AV23IsActive = false ;
this.AV5ActivationDate = gx.date.nullDate() ;
this.AV14DontReceiveInformation = false ;
this.AV12CannotChangePassword = false ;
this.AV32MustChangePassword = false ;
this.AV37PasswordNeverExpires = false ;
this.AV24IsBlocked = false ;
this.AV49SecurityPolicyId = 0 ;
this.AV25IsEnabledInRepository = false ;
this.AV13DateLastAuthentication = gx.date.nullDate() ;
this.AV46ReqPassword = "" ;
this.AV34Password = "" ;
this.AV35PasswordConf = "" ;
this.AV43ReqIcon = "" ;
this.AV50String = "" ;
this.Gx_mode = "" ;
this.addContextTracker(null, "", ["String"], this.e162221_client);
this.addContextSetter("vURLPROFILE", "Attribute", ["URLProfile"], this.e172222_client);
this.Events = {"e122222_client": ["ENTER", true] ,"e132222_client": ["'ENABLEDISABLE'", true] ,"e142222_client": ["VAUTHENTICATIONTYPENAME.ISVALID", true] ,"e182221_client": ["CANCEL", true] ,"e162221_client": ["TRACKCONTEXT_GX1", false] ,"e172222_client": ["URLPROFILE.SETCONTEXT", false]};
this.EvtParms["REFRESH"] = [[],[]];
this.EvtParms["ENTER"] = [[{av:'AV53UserId',fld:'vUSERID'},{av:'Gx_mode',fld:'vMODE'},{av:'AV7AuthenticationTypeName',fld:'vAUTHENTICATIONTYPENAME'},{av:'AV34Password',fld:'vPASSWORD'},{av:'AV35PasswordConf',fld:'vPASSWORDCONF'},{av:'AV33Name',fld:'vNAME'},{av:'AV15EMail',fld:'vEMAIL'},{av:'AV20FirstName',fld:'vFIRSTNAME'},{av:'AV28LastName',fld:'vLASTNAME'},{av:'AV18ExternalId',fld:'vEXTERNALID'},{av:'AV11Birthday',fld:'vBIRTHDAY'},{av:'AV21Gender',fld:'vGENDER'},{av:'AV23IsActive',fld:'vISACTIVE'},{av:'AV14DontReceiveInformation',fld:'vDONTRECEIVEINFORMATION'},{av:'AV12CannotChangePassword',fld:'vCANNOTCHANGEPASSWORD'},{av:'AV32MustChangePassword',fld:'vMUSTCHANGEPASSWORD'},{av:'AV24IsBlocked',fld:'vISBLOCKED'},{av:'AV37PasswordNeverExpires',fld:'vPASSWORDNEVEREXPIRES'},{av:'AV49SecurityPolicyId',fld:'vSECURITYPOLICYID'}],[{av:'AV36PasswordIsOK',fld:'vPASSWORDISOK'},{av:'AV10AuthTypeId',fld:'vAUTHTYPEID'},{av:'AV34Password',fld:'vPASSWORD'},{av:'AV69GXV3',fld:'vGXV3'}]];
this.EvtParms["'ENABLEDISABLE'"] = [[{av:'Gx_mode',fld:'vMODE'},{av:'AV25IsEnabledInRepository',fld:'vISENABLEDINREPOSITORY'},{av:'AV53UserId',fld:'vUSERID'}],[{av:'AV26isOK',fld:'vISOK'},{av:'AV70GXV4',fld:'vGXV4'}]];
this.EvtParms["VAUTHENTICATIONTYPENAME.ISVALID"] = [[{av:'AV7AuthenticationTypeName',fld:'vAUTHENTICATIONTYPENAME'}],[{av:'AV10AuthTypeId',fld:'vAUTHTYPEID'},{av:'gx.fn.getCtrlProperty("TABLEPASSWORD","Visible")',ctrl:'TABLEPASSWORD',prop:'Visible'}]];
this.EvtParms["TRACKCONTEXT_GX1"] = [[{av:'AV50String',fld:'vSTRING'},{av:'AV51URLProfile',fld:'vURLPROFILE'}],[{av:'gx.fn.getCtrlProperty("TBGO","Link")',ctrl:'TBGO',prop:'Link'}]];
this.EnterCtrl = ["BTNCONFIRM"];
this.setVCMap("AV50String", "vSTRING", 0, "svchar");
this.setVCMap("Gx_mode", "vMODE", 0, "char");
this.InitStandaloneVars( );
});
gx.setParentObj(new gamexampleentryuser());
|
def substrCount(n, s):
l = []
count = 0
cur = None
# 1st pass
for i in range(n):
if s[i] == cur:
count += 1
else:
if cur is not None:
l.append((cur, count))
cur = s[i]
count = 1
l.append((cur, count))
ans = 0
# 2nd pass
for i in l:
ans += (i[1] * (i[1] + 1)) // 2
# 3rd pass
for i in range(1, len(l) - 1):
if l[i - 1][0] == l[i + 1][0] and l[i][1] == 1:
ans += min(l[i - 1][1], l[i + 1][1])
return ans
# solution 2
def substrCount(n, s):
tot = 0
count_sequence = 0
prev = ''
for i,v in enumerate(s):
# first increase counter for all seperate characters
count_sequence += 1
if i and (prev != v):
# if this is not the first char in the string
# and it is not same as previous char,
# we should check for sequence x.x, xx.xx, xxx.xxx etc
# and we know it cant be longer on the right side than
# the sequence we already found on the left side.
j = 1
while ((i-j) >= 0) and ((i+j) < len(s)) and j <= count_sequence:
# make sure the chars to the right and left are equal
# to the char in the previous found squence
if s[i-j] == prev == s[i+j]:
# if so increase total score and step one step further out
tot += 1
j += 1
else:
# no need to loop any further if this loop did
# not find an x.x pattern
break
#if the current char is different from previous, reset counter to 1
count_sequence = 1
tot += count_sequence
prev = v
return tot
|
var assert = require('assert');
var fs = require('fs');
var fixedPath = process.cwd() + '/test/receipts/apple';
describe('#### Apple ####', function () {
it('Can parse the validated subscription receipt with duplicates', function (done) {
var iap = require('../');
var list = [{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381600687","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 19:58:55 Etc/GMT","purchase_date_ms":"1520539135000","purchase_date_pst":"2018-03-08 11:58:55 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:03:55 Etc/GMT","expires_date_ms":"1520539435000","expires_date_pst":"2018-03-08 12:03:55 America/Los_Angeles","web_order_line_item_id":"1000000038056225","is_trial_period":"false","is_in_intro_offer_period":"false"},{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381600903","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 20:03:55 Etc/GMT","purchase_date_ms":"1520539435000","purchase_date_pst":"2018-03-08 12:03:55 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:08:55 Etc/GMT","expires_date_ms":"1520539735000","expires_date_pst":"2018-03-08 12:08:55 America/Los_Angeles","web_order_line_item_id":"1000000038056226","is_trial_period":"false","is_in_intro_offer_period":"false"},{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381601336","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 20:09:15 Etc/GMT","purchase_date_ms":"1520539755000","purchase_date_pst":"2018-03-08 12:09:15 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:14:15 Etc/GMT","expires_date_ms":"1520540055000","expires_date_pst":"2018-03-08 12:14:15 America/Los_Angeles","web_order_line_item_id":"1000000038056264","is_trial_period":"false","is_in_intro_offer_period":"false"},{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381601740","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 20:14:30 Etc/GMT","purchase_date_ms":"1520540070000","purchase_date_pst":"2018-03-08 12:14:30 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:19:30 Etc/GMT","expires_date_ms":"1520540370000","expires_date_pst":"2018-03-08 12:19:30 America/Los_Angeles","web_order_line_item_id":"1000000038056312","is_trial_period":"false","is_in_intro_offer_period":"false"},{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381602052","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 20:19:30 Etc/GMT","purchase_date_ms":"1520540370000","purchase_date_pst":"2018-03-08 12:19:30 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:24:30 Etc/GMT","expires_date_ms":"1520540670000","expires_date_pst":"2018-03-08 12:24:30 America/Los_Angeles","web_order_line_item_id":"1000000038056364","is_trial_period":"false","is_in_intro_offer_period":"false"},{"quantity":"1","product_id":"basicmembership","transaction_id":"1000000381602343","original_transaction_id":"1000000381600687","purchase_date":"2018-03-08 20:24:30 Etc/GMT","purchase_date_ms":"1520540670000","purchase_date_pst":"2018-03-08 12:24:30 America/Los_Angeles","original_purchase_date":"2018-03-08 19:58:56 Etc/GMT","original_purchase_date_ms":"1520539136000","original_purchase_date_pst":"2018-03-08 11:58:56 America/Los_Angeles","expires_date":"2018-03-08 20:29:30 Etc/GMT","expires_date_ms":"1520540970000","expires_date_pst":"2018-03-08 12:29:30 America/Los_Angeles","web_order_line_item_id":"1000000038056406","is_trial_period":"false","is_in_intro_offer_period":"false"}];
var data = {
service: iap.APPLE,
receipt: {
in_app: [],
latest_receipt_info: list
}
};
var res = iap.getPurchaseData(data);
console.log(res);
assert.equal(res.length, 1);
assert.equal(res[0].originalTransactionId, '1000000381600687');
assert.equal(res[0].purchaseDateMs, 1520540670000);
assert.equal(res[0].isTrial, false);
done();
});
it('Can validate Unity apple in-app-purchase w/ auto-service detection', function (done) {
var path = process.cwd() + '/test/receipts/unity_apple';
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validate(receipt, function (error, response) {
if (error) {
console.error('Error >>>>', error);
}
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can validate apple in-app-purchase w/ auto-service detection', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validate(receipt, function (error, response) {
if (error) {
console.error('Error >>>>', error);
}
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can validate apple in-app-purchase w/ Promise & auto service detection', function (done) {
if (!Promise) {
return done();
}
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
var receipt = fs.readFileSync(path, 'utf8');
iap.setup()
.then(function () {
iap.validate(receipt).then(onSuccess).catch(onError);
}).catch(function (error) {
throw error;
});
function onSuccess(response) {
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
}
function onError(error) {
throw error;
}
});
it('Can validate apple in-app-purchase', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validate(iap.APPLE, receipt, function (error, response) {
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can validate apple in-app-purchase w/ .validateOnce() and w/ auto-service detection', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validateOnce(receipt, null, function (error, response) {
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can validate Unity apple in-app-purchase w/ .validateOnce()', function (done) {
var path = process.cwd() + '/test/receipts/unity_apple';
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validateOnce(receipt, null, function (error, response) {
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can validate apple in-app-purchase w/ .validateOnce()', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
fs.readFile(path, function (error, data) {
assert.equal(error, undefined);
var receipt = data.toString();
iap.validateOnce(iap.APPLE, null, receipt, function (error, response) {
assert.equal(error, undefined);
assert.equal(iap.isValidated(response), true);
var data = iap.getPurchaseData(response, { ignoreExpired: true });
for (var i = 0, len = data.length; i < len; i++) {
console.log('parsedPurchaseData:', i, data);
assert(data[i].productId);
assert(data[i].purchaseDate);
assert(data[i].quantity);
}
done();
});
});
});
});
it('Can NOT validate apple in-app-purchase with incorrect receipt w/ auto-service detection', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
iap.validate('fake-receipt', function (error, response) {
assert(error);
assert.equal(iap.isValidated(response), false);
done();
});
});
});
it('Can NOT validate apple in-app-purchase with incorrect receipt', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
iap.validate(iap.APPLE, 'fake-receipt', function (error, response) {
assert(error);
assert.equal(iap.isValidated(response), false);
done();
});
});
});
it('Can get an error response', function (done) {
var path = process.argv[process.argv.length - 1].replace('--path=', '');
if (path === 'false') {
path = fixedPath;
}
var iap = require('../');
iap.config({
verbose: true
});
iap.setup(function (error) {
assert.equal(error, undefined);
iap.validate(iap.APPLE, 'fake-receipt', function (error, response) {
assert(error);
assert(response);
assert.equal(iap.isValidated(response), false);
done();
});
});
});
it('Can detect a valid receipt that bought nothing', function (done) {
var receipt = 'MIISnwYJKoZIhvcNAQcCoIISkDCCEowCAQExCzAJBgUrDgMCGgUAMIICUAYJKoZIhvcNAQcBoIICQQSCAj0xggI5MAoCARQCAQEEAgwAMAsCAQ4CAQEEAwIBUjALAgEZAgEBBAMCAQMwDAIBCgIBAQQEFgI0KzANAgENAgEBBAUCAwE6EDAOAgEBAgEBBAYCBDyGdAUwDgIBCQIBAQQGAgRQMjM0MA4CAQsCAQEEBgIEBwahzzAOAgEQAgEBBAYCBDB3db4wDwIBAwIBAQQHDAUxLjEuMjAPAgETAgEBBAcMBTEuMS4yMBACAQ8CAQEECAIGGXrXariDMBQCAQACAQEEDAwKUHJvZHVjdGlvbjAYAgEEAgECBBC7FVpt/pQ57AzKdFTnZWVZMBwCAQUCAQEEFEaRyt42pW8CqLvhb5sBWSB7vO4tMB4CAQgCAQEEFhYUMjAxNS0wOS0xNlQxOTo0ODoyNFowHgIBDAIBAQQWFhQyMDE1LTA5LTE2VDE5OjQ4OjI0WjAeAgESAgEBBBYWFDIwMTUtMDktMTZUMTk6NDg6MjRaMCMCAQICAQEEGwwZY29tLm11c3RhZmFkdXIuS2FyZ290YWtpcDBPAgEHAgEBBEcL7L2KhrByZ7oTxHIeACRceFDd/jxoo6fl4bazDrH5bStHgKP3e+z+FoHdkp2UU53CKviFSeYG19wRp4wFSXDXz3anVLDl+zBcAgEGAgEBBFQ/qVA9Mz6Hl6GCFLXIjDm0Ey8AZFdT5waMtZs4Ks2nCgXCY4t/yLcz1WrVj7PaHJJq+FMb+deRG0yFufMMJb0Fq0G8nzm2dSVKmXjzmbCmTK/tQP6ggg5VMIIFazCCBFOgAwIBAgIIGFlDIXJ0nPwwDQYJKoZIhvcNAQEFBQAwgZYxCzAJBgNVBAYTAlVTMRMwEQYDVQQKDApBcHBsZSBJbmMuMSwwKgYDVQQLDCNBcHBsZSBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9uczFEMEIGA1UEAww7QXBwbGUgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAxMTExMjE1ODAxWhcNMTUxMTExMjE1ODAxWjB4MSYwJAYDVQQDDB1NYWMgQXBwIFN0b3JlIFJlY2VpcHQgU2lnbmluZzEsMCoGA1UECwwjQXBwbGUgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMxEzARBgNVBAoMCkFwcGxlIEluYy4xCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtpPCtw8kXu3SNEjohQXjM5RmW+gnN797Q0nr+ckXlzNzMklKyG9oKRS4lKb0ZUs7R9fRLGZLuJjZvPUSUcvmL6n0s58c6Cj8UsCBostWYoBaopGuTkDDfSgu19PtTdmtivvyZ0js63m9Am0EWRj/jDefijfxYv+7ogNQhwrVkuCGEV4jRvXhJWMromqMshC3kSNNmj+DQPJkCVr3ja5WXNT1tG4DGwRdLBuvAJkX16X7SZHO4qERMV4ZAcDazlCDXsjrSTtJGirq4J+/0kZJnNiroYNhbA/B/LOtmXUq/COb7yII63tZFBGfczQt5rk5pjv35j7syqb7q68m34+IgQIDAQABo4IB2DCCAdQwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBSIJxcJqbYYYIvs67r2R1nFUlSjtzBNBgNVHR8ERjBEMEKgQKA+hjxodHRwOi8vZGV2ZWxvcGVyLmFwcGxlLmNvbS9jZXJ0aWZpY2F0aW9uYXV0aG9yaXR5L3d3ZHJjYS5jcmwwDgYDVR0PAQH/BAQDAgeAMB0GA1UdDgQWBBR1diSia2IMlzSh+k5eCAwiv3PvvjCCAREGA1UdIASCAQgwggEEMIIBAAYKKoZIhvdjZAUGATCB8TCBwwYIKwYBBQUHAgIwgbYMgbNSZWxpYW5jZSBvbiB0aGlzIGNlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRlIHBvbGljeSBhbmQgY2VydGlmaWNhdGlvbiBwcmFjdGljZSBzdGF0ZW1lbnRzLjApBggrBgEFBQcCARYdaHR0cDovL3d3dy5hcHBsZS5jb20vYXBwbGVjYS8wEAYKKoZIhvdjZAYLAQQCBQAwDQYJKoZIhvcNAQEFBQADggEBAKA78Ye8abS3g3wZ9J/EAmTfAsmOMXPLHD7cJgeL/Z7z7b5D1o1hLeTw3BZzAdY0o2kZdxS/uVjHUsmGAH9sbICXqZmF6HjzmhKnfjg4ZPMEy1/y9kH7ByXLAiFx80Q/0OJ7YfdC46u/d2zdLFCcgITFpW9YWXpGMUFouxM1RUKkjPoR1UsW8jI13h+80pldyOYCMlmQ6I3LOd8h2sN2+3o2GhYamEyFG+YrRS0vWRotxprWZpKj0jZSUIAgTTPIsprWU2KxYFLw9fd9EFDkEr+9cb60gMdtxG9bOTXR57fegSAnjjhcgoc6c2DE1vEcoKlmRH7ODCibI3+s7OagO90wggQjMIIDC6ADAgECAgEZMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBDQTAeFw0wODAyMTQxODU2MzVaFw0xNjAyMTQxODU2MzVaMIGWMQswCQYDVQQGEwJVUzETMBEGA1UECgwKQXBwbGUgSW5jLjEsMCoGA1UECwwjQXBwbGUgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMxRDBCBgNVBAMMO0FwcGxlIFdvcmxkd2lkZSBEZXZlbG9wZXIgUmVsYXRpb25zIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyjhUpstWqsgkOUjpjO7sX7h/JpG8NFN6znxjgGF3ZF6lByO2Of5QLRVWWHAtfsRuwUqFPi/w3oQaoVfJr3sY/2r6FRJJFQgZrKrbKjLtlmNoUhU9jIrsv2sYleADrAF9lwVnzg6FlTdq7Qm2rmfNUWSfxlzRvFduZzWAdjakh4FuOI/YKxVOeyXYWr9Og8GN0pPVGnG1YJydM05V+RJYDIa4Fg3B5XdFjVBIuist5JSF4ejEncZopbCj/Gd+cLoCWUt3QpE5ufXN4UzvwDtIjKblIV39amq7pxY1YNLmrfNGKcnow4vpecBqYWcVsvD95Wi8Yl9uz5nd7xtj/pJlqwIDAQABo4GuMIGrMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSIJxcJqbYYYIvs67r2R1nFUlSjtzAfBgNVHSMEGDAWgBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjA2BgNVHR8ELzAtMCugKaAnhiVodHRwOi8vd3d3LmFwcGxlLmNvbS9hcHBsZWNhL3Jvb3QuY3JsMBAGCiqGSIb3Y2QGAgEEAgUAMA0GCSqGSIb3DQEBBQUAA4IBAQDaMgCWxVSU0zuCN2Z9LmjVw8a4yyaMSJDPEyRqRo5j1PDQEwbd2MTBNxXyMxM5Ji3OLlVA4wsDr/oSwucNIbjVgM+sKC/OLbNOr4YZBMbpUN1MKUcQI/xsuxuYa0iJ4Vud3kbbNYU17z7Q4lhLOPTtdVofXHAdVjkS5eENEeSJJQa91bQVjl7QWZeQ6UuB4t8Yr0R0HhmgOkfMkR066yNa/qUtl/d7u9aHRkKF61I9JrJjqLSxyo/0zOKzyEfgv5pZg/ramFMqgvV8ZS6V2TNd9e1lzDE3xVoE6Gvh54gDSnWemyjLSkCIZUN13cs6JSPFnlf4Ls7SqZJecy4vJXUVMIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9wtj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IWq6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKMaLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAEggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNlIG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBjb25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZpY2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBcNplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQPy3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4FgxhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oPIQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AXUKqK1drk/NAJBzewdXUhMYIByzCCAccCAQEwgaMwgZYxCzAJBgNVBAYTAlVTMRMwEQYDVQQKDApBcHBsZSBJbmMuMSwwKgYDVQQLDCNBcHBsZSBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9uczFEMEIGA1UEAww7QXBwbGUgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkCCBhZQyFydJz8MAkGBSsOAwIaBQAwDQYJKoZIhvcNAQEBBQAEggEAqFgxMO7Mtc7RB+9wV//JERsLCs0mcQL/De0WaSJlDRUIqrzw5nnqCiXNSavqooRAb90rcMZsAFCkU52c17hwt59qM0cxAaqqD2wOyW0Yv2i0itTShKTc9goYjidZevrbW1vGTRALwGZ0DxR75vjGceKpEN59U3Z22oNM6QjSpS3KuBsFmlyQg6kRSmvAOWjNeM6PfVJMVWEvlMVE3+eFeGApdXskzBULxQc26cAAUF6H0Gb9YgmE1QjUTR8AATECOm4Gdr/+wq+hAOYeqTvxncgiiUrRsRvrFnWkM6xsWL2NaxAEBwmazmiOSKNMknJ1NYezyPk5bguDU1Sn/BIgIw==';
var iap = require('../');
iap.setup(function (error) {
assert.equal(error, undefined);
iap.validate(iap.APPLE, receipt, function (error, response) {
assert(error);
assert.equal(iap.isValidated(response), false);
done();
});
});
});
it('can parse both in_app and latest_receipt_info array with .getPurchaseData()', function () {
var iap = require('../');
var rec = {
service: iap.APPLE,
receipt: {
in_app: [
{ quantity: '1',
product_id: 'in_app.0',
transaction_id: '210000259386802',
original_transaction_id: '210000259386802',
purchase_date: '2016-04-14 16:03:33 Etc/GMT',
purchase_date_ms: '1460649813000',
purchase_date_pst: '2016-04-14 09:03:33 America/Los_Angeles',
original_purchase_date: '2016-04-14 16:03:34 Etc/GMT',
original_purchase_date_ms: '1460649814000',
original_purchase_date_pst: '2016-04-14 09:03:34 America/Los_Angeles',
expires_date: '2016-05-14 16:03:33 Etc/GMT',
expires_date_ms: '1463241813000',
expires_date_pst: '2016-05-14 09:03:33 America/Los_Angeles',
web_order_line_item_id: '210000038560504',
is_trial_period: 'false' }
]
},
latest_receipt_info: [
{ quantity: '1',
product_id: 'latest_receipt_info.0',
transaction_id: '210000259386802',
original_transaction_id: '210000259386802',
purchase_date: '2016-04-14 16:03:33 Etc/GMT',
purchase_date_ms: '1460649813982',
purchase_date_pst: '2016-04-14 09:03:33 America/Los_Angeles',
original_purchase_date: '2016-04-14 16:03:34 Etc/GMT',
original_purchase_date_ms: '1460649814000',
original_purchase_date_pst: '2016-04-14 09:03:34 America/Los_Angeles',
expires_date: '2016-05-14 16:03:33 Etc/GMT',
expires_date_ms: '1463241813982',
expires_date_pst: '2016-05-14 09:03:33 America/Los_Angeles',
web_order_line_item_id: '210000038560504',
is_trial_period: 'false' },
{ quantity: '1',
product_id: 'latest_receipt_info.1',
transaction_id: '210000265773203',
original_transaction_id: '210000259386802',
purchase_date: '2016-05-14 16:03:33 Etc/GMT',
purchase_date_ms: '1463241813000',
purchase_date_pst: '2016-05-14 09:03:33 America/Los_Angeles',
original_purchase_date: '2016-05-14 10:03:37 Etc/GMT',
original_purchase_date_ms: '1463220217552',
original_purchase_date_pst: '2016-05-14 03:03:37 America/Los_Angeles',
expires_date: '2016-06-14 16:03:33 Etc/GMT',
expires_date_ms: '1465920213000',
expires_date_pst: '2016-06-14 09:03:33 America/Los_Angeles',
web_order_line_item_id: '210000038560503',
is_trial_period: 'false' }
]
};
iap.config({
verbose: true
});
var parsed = iap.getPurchaseData(rec);
var res = [
'in_app.0',
'latest_receipt_info.0',
'latest_receipt_info.1'
];
for (var i = 0, len = parsed.length; i < len; i++) {
if (res.indexOf(parsed[i].productId) === -1) {
console.error(parsed[i]);
throw new Error('missing purchase data');
}
console.log(parsed[i].productId, parsed[i].transactionId);
}
});
it('can parse without latest_receipt_info array with .getPurchaseData()', function () {
var iap = require('../');
var rec = {
service: iap.APPLE,
receipt: {
in_app: [
{ quantity: '1',
product_id: 'in_app.0',
transaction_id: '210000259386802',
original_transaction_id: '210000259386802',
purchase_date: '2016-04-14 16:03:33 Etc/GMT',
purchase_date_ms: '1460649813000',
purchase_date_pst: '2016-04-14 09:03:33 America/Los_Angeles',
original_purchase_date: '2016-04-14 16:03:34 Etc/GMT',
original_purchase_date_ms: '1460649814000',
original_purchase_date_pst: '2016-04-14 09:03:34 America/Los_Angeles',
expires_date: '2016-05-14 16:03:33 Etc/GMT',
expires_date_ms: '1463241813000',
expires_date_pst: '2016-05-14 09:03:33 America/Los_Angeles',
web_order_line_item_id: '210000038560504',
is_trial_period: 'false' }
]
}
};
iap.config({
verbose: true
});
var parsed = iap.getPurchaseData(rec);
var res = [
'in_app.0',
];
for (var i = 0, len = parsed.length; i < len; i++) {
if (res.indexOf(parsed[i].productId) === -1) {
throw new Error('missing purchase data');
}
console.log(parsed[i].productId, parsed[i].transactionId);
}
});
});
|
/*
* Copyright 2018-2020 Redis Labs Ltd. and Contributors
*
* This file is available under the Redis Labs Source Available License Agreement
*/
#include "op_join.h"
#include "RG.h"
#include "../../query_ctx.h"
/* Forward declarations. */
static Record JoinConsume(OpBase *opBase);
static OpResult JoinInit(OpBase *opBase);
static OpBase *JoinClone(const ExecutionPlan *plan, const OpBase *opBase);
OpBase *NewJoinOp(const ExecutionPlan *plan) {
OpJoin *op = rm_malloc(sizeof(OpJoin));
op->stream = NULL;
// Set our Op operations
OpBase_Init((OpBase *)op, OPType_JOIN, "Join", JoinInit, JoinConsume,
NULL, NULL, JoinClone, NULL, false, plan);
return (OpBase *)op;
}
static OpResult JoinInit(OpBase *opBase) {
OpJoin *op = (OpJoin *)opBase;
// Start pulling from first stream.
op->streamIdx = 0;
op->stream = op->op.children[op->streamIdx];
return OP_OK;
}
static Record JoinConsume(OpBase *opBase) {
OpJoin *op = (OpJoin *)opBase;
Record r = NULL;
bool update_column_map = false;
while(!r) {
// Try pulling from current stream.
r = OpBase_Consume(op->stream);
if(!r) {
// Stream depleted, see if there's a new stream to pull from.
op->streamIdx++;
if(op->streamIdx >= op->op.childCount) break;
op->stream = op->op.children[op->streamIdx];
// Switched streams, need to update the ResultSet column mapping
update_column_map = true;
continue;
}
if(update_column_map) {
// We have a new record mapping, update the ResultSet column map to match it.
ResultSet_MapProjection(QueryCtx_GetResultSet(), r);
update_column_map = false;
}
}
return r;
}
static inline OpBase *JoinClone(const ExecutionPlan *plan, const OpBase *opBase) {
ASSERT(opBase->type == OPType_JOIN);
return NewJoinOp(plan);
}
|
var Animals = ["dog","cat","rabit","hamster","horse","swan","mouse","ant","bear","shark",];
var i ;
var input;
var title;
var bns;
$( document ).ready(function() {
console.log( "ready!" );
var button;
for (i=0; i<Animals.length; i++){
button=$('<button/>').attr({
type: "button",
class:"btn btn-outline-primary",
id: "btns",
value: Animals[i]
}).text(Animals[i]);
$("#buttons").append(button);
console.log(i)
};
$("#button-addon1").click(function() {
bns = $("button");
input=$( "input[class=form-control]" ).val();
button=$('<button/>').attr({
type: "button",
class:"btn btn-outline-primary",
id: "btns",
value: input
}).text(input);
$("#buttons").append(button);
console.log(i)
Animals.push(input);
console.log(Animals)
});
$(document).on('click', '#btns' ,function (event) {
//Process button click event
title = this.value;
var queryURL ="https://api.giphy.com/v1/gifs/search?q="+ title +"&api_key=dc6zaTOxFJmzC&limit=10";
// "https://www.omdbapi.com/?t=" + title + "&y=&plot=short&apikey=trilogy";
$.ajax({
url: queryURL,
method: "GET"
}).then(function(response) {
console.log(response);
console.log(response.Runtime);
// -----------------------------------------------------------------------------
var results = response.data;
for (var i = 0; i < results.length; i++) {
var gifDiv = $("<figure>");
var gifDiv2 = $("<figcaption>");
var gifDiv3 = $("<figcaption>");
var Image = $("<img>");
Image.attr({
src: results[i].images.fixed_height_still.url,
still: results[i].images.fixed_height_still.url,
animate: results[i].images.fixed_height.url,
state: 'still',
class: 'gif img-thumbnail'
});
var Rating = results[i].rating;
var Title = results[i].title;
console.log(Rating);
console.log(Title);
gifDiv3.prepend("Title: "+Title);
gifDiv2.prepend("Rating: "+Rating);
gifDiv.prepend(gifDiv3);
gifDiv.prepend(gifDiv2);
gifDiv.prepend(Image);
$("#gifs").prepend(gifDiv);
}
$(".gif").on("click", function() {
// The attr jQuery method allows us to get or set the value of any attribute on our HTML element
var state = $(this).attr("state");
// If the clicked image's state is still, update its src attribute to what its data-animate value is.
// Then, set the image's data-state to animate
// Else set src to the data-still value
if (state === "still") {
$(this).attr("src", $(this).attr("animate"));
$(this).attr("state", "animate");
} else {
$(this).attr("src", $(this).attr("still"));
$(this).attr("state", "still");
}
});
});
});
});
|
# Copyright (c) 2013 NTT DOCOMO, INC.
# Copyright 2014 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension."""
from oslo_config import cfg
from oslo_utils import importutils
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
CONF = cfg.CONF
ALIAS = "os-baremetal-nodes"
authorize = extensions.os_compute_authorizer(ALIAS)
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('compute_driver', 'nova.virt.driver')
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
common.raise_feature_not_supported()
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
icli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return icli
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API."""
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
@extensions.expected_errors((404, 501))
def index(self, req):
context = req.environ['nova.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
@extensions.expected_errors((404, 501))
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
@extensions.expected_errors(400)
def create(self, req, body):
_no_ironic_proxy("port-create")
@extensions.expected_errors(400)
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
@extensions.expected_errors(400)
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
@extensions.expected_errors(400)
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class BareMetalNodes(extensions.V21APIExtensionBase):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
BareMetalNodeController(),
member_actions={"action": "POST"})]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_checkapache
----------------------------------
Tests for `checkapache` module.
"""
import sys
import unittest
from contextlib import contextmanager
from click.testing import CliRunner
from checkapache import checkapache
from checkapache import cli
class TestCheckapache(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
def test_command_line_interface(self):
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'checkapache.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
|
const Employee = require("./Employee");
class Engineer extends Employee {
constructor (name, id, email, github) {
super (name, id, email);
this.github = github;
}
getGithub() {
return this.github;
}
getRole() {
return "Engineer";
}
}
module.exports = Engineer;
|
const RabbitClient = require('rabbit-client');
const Communicator = require('./Communicator');
module.exports = class CommunicationsManager {
constructor(settings) {
if (!settings) {
throw new Error('No settings passed to the CommunicationsManager constructor');
}
const {
rabbitClient,
rabbitOptions,
namespace = 'rabbit-communications',
} = settings;
if (!rabbitClient && !rabbitOptions) {
throw new Error(`
It is necessary to pass to the constructor either your own rabbitClient (RabbitClient instance)
or rabbitOptions to create RabbitClient instance within the service.
`);
}
this.namespace = namespace;
this.rabbitOptions = rabbitOptions;
this.rabbitClient = rabbitClient || new RabbitClient(this.rabbitOptions.url, {
appName: `${this.namespace}-communicator-manager`,
sleepTime: 1e3,
json: true,
...this.rabbitOptions,
});
this.communicatorMap = {};
this.rootMiddlewareList = [];
this.specificMiddlewareMap = {};
this.isManagerStarted = false;
}
isCommunicatorRegistered(targetServiceName) {
return this.communicatorMap[targetServiceName] !== undefined;
}
registerCommunicator(targetServiceName, communicatorOptions, outputListener) {
if (this.isCommunicatorRegistered(targetServiceName)) {
throw new Error(`Communicator for service ${targetServiceName} is already registered`);
}
this.communicatorMap[targetServiceName] = new Communicator({
...communicatorOptions,
rabbitClient: this.rabbitClient,
namespace: this.namespace,
manager: this,
targetServiceName,
});
if (typeof outputListener === 'function') {
this.addOutputListener(targetServiceName, outputListener);
}
}
getCommunicator(targetServiceName) {
if (!this.isCommunicatorRegistered(targetServiceName)) {
throw new Error(`No communicator registered for service "${targetServiceName}"`);
}
return this.communicatorMap[targetServiceName];
}
async send(targetServiceName, data, additionalMetadata = {}) {
await this.verifyStart();
return this.getCommunicator(targetServiceName).send(data, additionalMetadata);
}
async ask(targetServiceName, subject, data, additionalMetadata = {}) {
await this.verifyStart();
return this.getCommunicator(targetServiceName).ask(subject, data, additionalMetadata);
}
async broadcast(data, metadata = {}) {
await this.verifyStart();
return Promise.all(
Object.values(this.communicatorMap).map(
communicator => communicator.send(data, metadata),
),
);
}
applyMiddleware(...args) {
if (args[1] !== undefined) {
const specificMiddlewareList = Array.isArray(args[1]) ? args[1] : [args[1]];
const targetServiceNameList = Array.isArray(args[0]) ? args[0] : [args[0]];
targetServiceNameList.forEach((serviceName) => {
const list = this.specificMiddlewareMap[serviceName];
if (list === undefined) {
this.specificMiddlewareMap[serviceName] = [];
}
this.specificMiddlewareMap[serviceName].push(...specificMiddlewareList);
});
} else {
const newRootMiddlewareList = Array.isArray(args[0]) ? args[0] : [args[0]];
this.rootMiddlewareList.push(...newRootMiddlewareList);
}
}
addOutputListener(targetServiceName, fn) {
return this.getCommunicator(targetServiceName).addOutputListener(fn);
}
async start() {
await Promise.all(
Object.values(this.communicatorMap).map((communicator) => {
if (typeof communicator.outputListener === 'function') {
const middlewareList = [
...this.rootMiddlewareList,
...(this.specificMiddlewareMap[communicator.targetServiceName] || []),
communicator.outputListener,
];
const middlewareChain = middlewareList.map(
(m, i) => ctx => m(ctx, () => middlewareChain[i + 1](ctx)),
);
// eslint-disable-next-line no-param-reassign
communicator.outputListener = ctx => middlewareChain[0](ctx);
}
return communicator.start();
}),
);
this.isManagerStarted = true;
}
async verifyStart() {
return new Promise((resolve) => {
if (this.isManagerStarted) {
resolve();
return;
}
// wait for instance to start
const intervalId = setInterval(() => {
if (this.isManagerStarted) {
clearInterval(intervalId);
resolve();
}
}, 50);
});
}
};
|
const $ = require('../../core/renderer');
const noop = require('../../core/utils/common').noop;
const windowUtils = require('../../core/utils/window');
const domAdapter = require('../../core/dom_adapter');
const typeUtils = require('../../core/utils/type');
const each = require('../../core/utils/iterator').each;
const version = require('../../core/version');
const _windowResizeCallbacks = require('../../core/utils/resize_callbacks');
const _stringFormat = require('../../core/utils/string').format;
const _isObject = require('../../core/utils/type').isObject;
const extend = require('../../core/utils/extend').extend;
const themeManagerModule = require('../core/base_theme_manager');
const _floor = Math.floor;
const DOMComponent = require('../../core/dom_component');
const helpers = require('./helpers');
const _parseScalar = require('./utils').parseScalar;
const errors = require('./errors_warnings');
const _log = errors.log;
const rendererModule = require('./renderers/renderer');
const _Layout = require('./layout');
const devices = require('../../core/devices');
const eventsEngine = require('../../events/core/events_engine');
const OPTION_RTL_ENABLED = 'rtlEnabled';
const SIZED_ELEMENT_CLASS = 'dx-sized-element';
const _option = DOMComponent.prototype.option;
function getTrue() {
return true;
}
function getFalse() {
return false;
}
function areCanvasesDifferent(canvas1, canvas2) {
return !(canvas1.width === canvas2.width && canvas1.height === canvas2.height &&
canvas1.left === canvas2.left && canvas1.top === canvas2.top && canvas1.right === canvas2.right && canvas1.bottom === canvas2.bottom);
}
function createResizeHandler(callback) {
let timeout;
const handler = function() {
clearTimeout(timeout);
timeout = setTimeout(callback, 100);
};
handler.dispose = function() {
clearTimeout(timeout);
return this;
};
return handler;
}
function defaultOnIncidentOccurred(e) {
if(!e.component.hasEvent('incidentOccurred')) {
_log.apply(null, [e.target.id].concat(e.target.args || []));
}
}
let createIncidentOccurred = function(widgetName, eventTrigger) {
return function incidentOccurred(id, args) {
eventTrigger('incidentOccurred', {
target: {
id: id,
type: id[0] === 'E' ? 'error' : 'warning',
args: args,
text: _stringFormat.apply(null, [errors.ERROR_MESSAGES[id]].concat(args || [])),
widget: widgetName,
version: version
}
});
};
};
function pickPositiveValue(values) {
return values.reduce(function(result, value) {
return (value > 0 && !result) ? value : result;
}, 0);
}
// TODO - Changes handling
// * Provide more validation - something like
// _changes: [{
// code: "THEME",
// options: ["theme"],
// type: "option",
// handler: function () {
// this._setThemeAndRtl();
// }
// }, {
// code: "CONTAINER_SIZE",
// options: ["size", "option"],
// type: "layout",
// handler: function () {
// this._updateSize();
// }
// }]
const getEmptyComponent = function() {
const emptyComponentConfig = {};
emptyComponentConfig.ctor = function(element, options) {
this.callBase(element, options);
const sizedElement = domAdapter.createElement('div');
const width = options && typeUtils.isNumeric(options.width) ? options.width + 'px' : '100%';
const height = options && typeUtils.isNumeric(options.height) ? options.height + 'px' : this._getDefaultSize().height + 'px';
domAdapter.setStyle(sizedElement, 'width', width);
domAdapter.setStyle(sizedElement, 'height', height);
domAdapter.setClass(sizedElement, SIZED_ELEMENT_CLASS);
domAdapter.insertElement(element, sizedElement);
};
const EmptyComponent = DOMComponent.inherit(emptyComponentConfig);
const originalInherit = EmptyComponent.inherit;
EmptyComponent.inherit = function(config) {
for(const field in config) {
if(typeUtils.isFunction(config[field]) && field.substr(0, 1) !== '_' || field === '_dispose' || field === '_optionChanged') {
config[field] = noop;
}
}
return originalInherit.call(this, config);
};
return EmptyComponent;
};
const isServerSide = !windowUtils.hasWindow();
function sizeIsValid(value) {
return typeUtils.isDefined(value) && value > 0;
}
module.exports = isServerSide ? getEmptyComponent() : DOMComponent.inherit({
_eventsMap: {
'onIncidentOccurred': { name: 'incidentOccurred' },
'onDrawn': { name: 'drawn' }
},
_getDefaultOptions: function() {
return extend(this.callBase(), {
onIncidentOccurred: defaultOnIncidentOccurred
});
},
_useLinks: true,
_init: function() {
const that = this;
that._$element.children('.' + SIZED_ELEMENT_CLASS).remove();
that.callBase.apply(that, arguments);
that._changesLocker = 0;
that._optionChangedLocker = 0;
that._changes = helpers.changes();
that._suspendChanges();
that._themeManager = that._createThemeManager();
that._themeManager.setCallback(function() {
that._requestChange(that._themeDependentChanges);
});
that._renderElementAttributes();
that._initRenderer();
// Shouldn't "_useLinks" be passed to the renderer instead of doing 3 checks here?
const linkTarget = that._useLinks && that._renderer.root;
// There is an implicit relation between `_useLinks` and `loading indicator` - it uses links
// Though this relation is not ensured in code we will immediately know when it is broken - `loading indicator` will break on construction
linkTarget && linkTarget.enableLinks().virtualLink('core').virtualLink('peripheral');
that._renderVisibilityChange();
that._attachVisibilityChangeHandlers();
that._toggleParentsScrollSubscription(this._isVisible());
that._initEventTrigger();
that._incidentOccurred = createIncidentOccurred(that.NAME, that._eventTrigger);
that._layout = new _Layout();
// Such solution is used only to avoid writing lots of "after" for all core elements in all widgets
// May be later a proper solution would be found
linkTarget && linkTarget.linkAfter('core');
that._initPlugins();
that._initCore();
linkTarget && linkTarget.linkAfter();
that._change(that._initialChanges);
},
_createThemeManager() {
return new themeManagerModule.BaseThemeManager(this._getThemeManagerOptions());
},
_getThemeManagerOptions() {
return {
themeSection: this._themeSection,
fontFields: this._fontFields
};
},
_initialChanges: ['LAYOUT', 'RESIZE_HANDLER', 'THEME', 'DISABLED'],
_initPlugins: function() {
const that = this;
each(that._plugins, function(_, plugin) {
plugin.init.call(that);
});
},
_disposePlugins: function() {
const that = this;
each(that._plugins.slice().reverse(), function(_, plugin) {
plugin.dispose.call(that);
});
},
_change: function(codes) {
this._changes.add(codes);
},
_suspendChanges: function() {
++this._changesLocker;
},
_resumeChanges: function() {
const that = this;
if(--that._changesLocker === 0 && that._changes.count() > 0 && !that._applyingChanges) {
that._renderer.lock();
that._applyingChanges = true;
that._applyChanges();
that._changes.reset();
that._applyingChanges = false;
that._renderer.unlock();
if(that._optionsQueue) {
that._applyQueuedOptions();
}
that._optionChangedLocker++;
that._notify();
that._optionChangedLocker--;
}
},
_applyQueuedOptions: function() {
const that = this;
const queue = that._optionsQueue;
that._optionsQueue = null;
that.beginUpdate();
each(queue, function(_, action) {
action();
});
that.endUpdate();
},
_requestChange: function(codes) {
this._suspendChanges();
this._change(codes);
this._resumeChanges();
},
_applyChanges: function() {
const that = this;
const changes = that._changes;
const order = that._totalChangesOrder;
let i;
const ii = order.length;
for(i = 0; i < ii; ++i) {
if(changes.has(order[i])) {
that['_change_' + order[i]]();
}
}
},
_optionChangesOrder: ['EVENTS', 'THEME', 'RENDERER', 'RESIZE_HANDLER'],
_layoutChangesOrder: ['ELEMENT_ATTR', 'CONTAINER_SIZE', 'LAYOUT'],
_customChangesOrder: ['DISABLED'],
_change_EVENTS: function() {
this._eventTrigger.applyChanges();
},
_change_THEME: function() {
this._setThemeAndRtl();
},
_change_RENDERER: function() {
this._setRendererOptions();
},
_change_RESIZE_HANDLER: function() {
this._setupResizeHandler();
},
_change_ELEMENT_ATTR: function() {
this._renderElementAttributes();
this._change(['CONTAINER_SIZE']);
},
_change_CONTAINER_SIZE: function() {
this._updateSize();
},
_change_LAYOUT: function() {
this._setContentSize();
},
_change_DISABLED: function() {
const renderer = this._renderer;
const root = renderer.root;
if(this.option('disabled')) {
this._initDisabledState = root.attr('pointer-events');
root.attr({
'pointer-events': 'none',
filter: renderer.getGrayScaleFilter().id
});
} else {
if(root.attr('pointer-events') === 'none') {
root.attr({
'pointer-events': typeUtils.isDefined(this._initDisabledState) ? this._initDisabledState : null,
'filter': null
});
}
}
},
_themeDependentChanges: ['RENDERER'],
_initRenderer: function() {
const that = this;
// Canvas is calculated before the renderer is created in order to capture actual size of the container
that._canvas = that._calculateCanvas();
that._renderer = new rendererModule.Renderer({ cssClass: that._rootClassPrefix + ' ' + that._rootClass, pathModified: that.option('pathModified'), container: that._$element[0] });
that._renderer.resize(that._canvas.width, that._canvas.height);
},
_disposeRenderer: function() {
///#DEBUG
// NOTE: This is temporary - until links mechanism is stabilized
this._useLinks && this._renderer.root.checkLinks();
///#ENDDEBUG
this._renderer.dispose();
},
_getAnimationOptions: noop,
render: function() {
this._requestChange(['CONTAINER_SIZE']);
const visible = this._isVisible();
this._toggleParentsScrollSubscription(visible);
!visible && this._stopCurrentHandling();
},
_toggleParentsScrollSubscription: function(subscribe) {
let $parents = $(this._renderer.root.element).parents();
const scrollEvents = 'scroll.viz_widgets';
if(devices.real().platform === 'generic') {
$parents = $parents.add(windowUtils.getWindow());
}
this._proxiedTargetParentsScrollHandler = this._proxiedTargetParentsScrollHandler
|| (function() { this._stopCurrentHandling(); }).bind(this);
eventsEngine.off($().add(this._$prevRootParents), scrollEvents, this._proxiedTargetParentsScrollHandler);
if(subscribe) {
eventsEngine.on($parents, scrollEvents, this._proxiedTargetParentsScrollHandler);
this._$prevRootParents = $parents;
}
},
_stopCurrentHandling: noop,
_dispose: function() {
const that = this;
that.callBase.apply(that, arguments);
that._toggleParentsScrollSubscription(false);
that._removeResizeHandler();
that._layout.dispose();
that._eventTrigger.dispose();
that._disposeCore();
that._disposePlugins();
that._disposeRenderer();
that._themeManager.dispose();
that._themeManager = that._renderer = that._eventTrigger = null;
},
_initEventTrigger: function() {
const that = this;
that._eventTrigger = createEventTrigger(that._eventsMap, function(name) { return that._createActionByOption(name); });
},
_calculateCanvas: function() {
const that = this;
const size = that.option('size') || {};
const margin = that.option('margin') || {};
const defaultCanvas = that._getDefaultSize() || {};
const elementWidth = !sizeIsValid(size.width) && windowUtils.hasWindow() ? that._$element.width() : 0;
const elementHeight = !sizeIsValid(size.height) && windowUtils.hasWindow() ? that._$element.height() : 0;
let canvas = {
width: size.width <= 0 ? 0 : _floor(pickPositiveValue([size.width, elementWidth, defaultCanvas.width])),
height: size.height <= 0 ? 0 : _floor(pickPositiveValue([size.height, elementHeight, defaultCanvas.height])),
left: pickPositiveValue([margin.left, defaultCanvas.left]),
top: pickPositiveValue([margin.top, defaultCanvas.top]),
right: pickPositiveValue([margin.right, defaultCanvas.right]),
bottom: pickPositiveValue([margin.bottom, defaultCanvas.bottom])
};
// This for backward compatibility - widget was not rendered when canvas is empty.
// Now it will be rendered but because of "width" and "height" of the root both set to 0 it will not be visible.
if(canvas.width - canvas.left - canvas.right <= 0 || canvas.height - canvas.top - canvas.bottom <= 0) {
canvas = { width: 0, height: 0 };
}
return canvas;
},
_updateSize: function() {
const that = this;
const canvas = that._calculateCanvas();
that._renderer.fixPlacement();
if(areCanvasesDifferent(that._canvas, canvas) || that.__forceRender /* for charts */) {
that._canvas = canvas;
that._recreateSizeDependentObjects(true);
that._renderer.resize(canvas.width, canvas.height);
that._change(['LAYOUT']);
}
},
_recreateSizeDependentObjects: noop,
_getMinSize: function() {
return [0, 0];
},
_getAlignmentRect: noop,
_setContentSize: function() {
const canvas = this._canvas;
const layout = this._layout;
let rect = canvas.width > 0 && canvas.height > 0 ? [canvas.left, canvas.top, canvas.width - canvas.right, canvas.height - canvas.bottom] : [0, 0, 0, 0];
rect = layout.forward(rect, this._getMinSize());
const nextRect = this._applySize(rect) || rect;
layout.backward(nextRect, this._getAlignmentRect() || nextRect);
},
///#DEBUG
DEBUG_getCanvas: function() {
return this._canvas;
},
DEBUG_getEventTrigger: function() {
return this._eventTrigger;
},
///#ENDDEBUG
_getOption: function(name, isScalar) {
const theme = this._themeManager.theme(name);
const option = this.option(name);
return isScalar ? (option !== undefined ? option : theme) : extend(true, {}, theme, option);
},
_setupResizeHandler: function() {
const that = this;
const redrawOnResize = _parseScalar(this._getOption('redrawOnResize', true), true);
if(that._resizeHandler) {
that._removeResizeHandler();
}
that._resizeHandler = createResizeHandler(function() {
if(redrawOnResize) {
that._requestChange(['CONTAINER_SIZE']);
} else {
that._renderer.fixPlacement();
}
});
_windowResizeCallbacks.add(that._resizeHandler);
},
_removeResizeHandler: function() {
if(this._resizeHandler) {
_windowResizeCallbacks.remove(this._resizeHandler);
this._resizeHandler.dispose();
this._resizeHandler = null;
}
},
// This is actually added only to make loading indicator pluggable. This is bad but much better than entire loading indicator in BaseWidget.
_onBeginUpdate: noop,
beginUpdate: function() {
const that = this;
// The "_initialized" flag is checked because first time "beginUpdate" is called in the constructor.
if(that._initialized && that._updateLockCount === 0) {
that._onBeginUpdate();
that._suspendChanges();
}
that.callBase.apply(that, arguments);
return that;
},
endUpdate: function() {
const that = this;
that.callBase.apply(that, arguments);
if(that._updateLockCount === 0) {
that._resumeChanges();
}
return that;
},
option: function(name) {
const that = this;
// NOTE: `undefined` has to be returned because base option setter returns `undefined`.
// `argument.length` and `isObject` checks are copypaste from Component.
if(that._initialized && that._applyingChanges && (arguments.length > 1 || _isObject(name))) {
that._optionsQueue = that._optionsQueue || [];
that._optionsQueue.push(that._getActionForUpdating(arguments));
} else {
return _option.apply(that, arguments);
}
},
_getActionForUpdating: function(args) {
const that = this;
return that._deprecatedOptionsSuppressed ? function() { // T479911
that._suppressDeprecatedWarnings();
_option.apply(that, args);
that._resumeDeprecatedWarnings();
} : function() {
_option.apply(that, args);
};
},
// For quite a long time the following method were abstract (from the Component perspective).
// Now they are not but that basic functionality is not required here.
_clean: noop,
_render: noop,
_optionChanged: function(arg) {
const that = this;
if(that._optionChangedLocker) {
return;
}
const partialChanges = that.getPartialChangeOptionsName(arg);
let changes = [];
if(partialChanges.length > 0) {
partialChanges.forEach(pc => changes.push(that._partialOptionChangesMap[pc]));
} else {
changes.push(that._optionChangesMap[arg.name]);
}
changes = changes.filter(c => !!c);
if(that._eventTrigger.change(arg.name)) {
that._change(['EVENTS']);
} else if(changes.length > 0) {
that._change(changes);
} else {
that.callBase.apply(that, arguments);
}
},
_notify: noop,
_optionChangesMap: {
size: 'CONTAINER_SIZE',
margin: 'CONTAINER_SIZE',
redrawOnResize: 'RESIZE_HANDLER',
theme: 'THEME',
rtlEnabled: 'THEME',
encodeHtml: 'THEME',
elementAttr: 'ELEMENT_ATTR',
disabled: 'DISABLED'
},
_partialOptionChangesMap: { },
_partialOptionChangesPath: { },
getPartialChangeOptionsName: function(changedOption) {
const that = this;
const fullName = changedOption.fullName;
const sections = fullName.split(/[.]/);
const name = changedOption.name;
const value = changedOption.value;
const options = this._partialOptionChangesPath[name];
const partialChangeOptionsName = [];
if(options) {
if(options === true) {
partialChangeOptionsName.push(name);
} else {
options.forEach(op => {
fullName.indexOf(op) >= 0 && partialChangeOptionsName.push(op);
});
if(sections.length === 1) {
if(typeUtils.type(value) === 'object') {
that._addOptionsNameForPartialUpdate(value, options, partialChangeOptionsName);
} else if(typeUtils.type(value) === 'array') {
if(value.length > 0 && value.every(item => that._checkOptionsForPartialUpdate(item, options))) {
value.forEach(item => that._addOptionsNameForPartialUpdate(item, options, partialChangeOptionsName));
}
}
}
}
}
return partialChangeOptionsName.filter((value, index, self) => self.indexOf(value) === index);
},
_checkOptionsForPartialUpdate: function(optionObject, options) {
return !Object.keys(optionObject).some((key) => options.indexOf(key) === -1);
},
_addOptionsNameForPartialUpdate: function(optionObject, options, partialChangeOptionsName) {
const optionKeys = Object.keys(optionObject);
if(this._checkOptionsForPartialUpdate(optionObject, options)) {
optionKeys.forEach((key) => options.indexOf(key) > -1 && partialChangeOptionsName.push(key));
}
},
_visibilityChanged: function() {
this.render();
},
_setThemeAndRtl: function() {
this._themeManager.setTheme(this.option('theme'), this.option(OPTION_RTL_ENABLED));
},
_getRendererOptions: function() {
return {
rtl: this.option(OPTION_RTL_ENABLED),
encodeHtml: this.option('encodeHtml'),
animation: this._getAnimationOptions()
};
},
_setRendererOptions: function() {
this._renderer.setOptions(this._getRendererOptions());
},
svg: function() {
return this._renderer.svg();
},
getSize: function() {
const canvas = this._canvas || {};
return { width: canvas.width, height: canvas.height };
},
isReady: getFalse,
_dataIsReady: getTrue,
_resetIsReady: function() {
this.isReady = getFalse;
},
_drawn: function() {
const that = this;
that.isReady = getFalse;
if(that._dataIsReady()) {
that._renderer.onEndAnimation(function() {
that.isReady = getTrue;
});
}
that._eventTrigger('drawn', {});
}
});
helpers.replaceInherit(module.exports);
function createEventTrigger(eventsMap, callbackGetter) {
let triggers = {};
each(eventsMap, function(name, info) {
if(info.name) {
createEvent(name);
}
});
let changes;
triggerEvent.change = function(name) {
const eventInfo = eventsMap[name];
if(eventInfo) {
(changes = changes || {})[name] = eventInfo;
}
return !!eventInfo;
};
triggerEvent.applyChanges = function() {
if(changes) {
each(changes, function(name, eventInfo) {
createEvent(eventInfo.newName || name);
});
changes = null;
}
};
triggerEvent.dispose = function() {
eventsMap = callbackGetter = triggers = null;
};
return triggerEvent;
function createEvent(name) {
const eventInfo = eventsMap[name];
triggers[eventInfo.name] = callbackGetter(name);
}
function triggerEvent(name, arg, complete) {
triggers[name](arg);
complete && complete();
}
}
///#DEBUG
module.exports.DEBUG_createEventTrigger = createEventTrigger;
module.exports.DEBUG_createIncidentOccurred = createIncidentOccurred;
module.exports.DEBUG_stub_createIncidentOccurred = function(stub) {
createIncidentOccurred = stub;
};
module.exports.DEBUG_restore_createIncidentOccurred = function() {
createIncidentOccurred = module.exports.DEBUG_createIncidentOccurred;
};
module.exports.DEBUG_createResizeHandler = createResizeHandler;
///#ENDDEBUG
|
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import datetime
import inspect
import json
import os
import threading
import traceback
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Union, cast
import dramatiq.broker
import dramatiq.message
import dramatiq.middleware
import dramatiq.middleware.retries
import dramatiq.worker
import gluetool.log
import redis
from dramatiq.common import compute_backoff, current_millis
from gluetool.result import Ok
from .guest import GuestLogger
if TYPE_CHECKING:
from . import ExceptionInfoType
from .tasks import Actor
# Dramatiq does not have a global default for maximal number of retries, the value is only present as a default
# of `Retries` middleware's `retries` keyword parameter.
DEFAULT_MAX_RETRIES = 20
def _actor_arguments(
logger: gluetool.log.ContextAdapter,
message: dramatiq.message.Message,
actor: 'Actor'
) -> Dict[str, Union[str, None]]:
signature = inspect.signature(actor.fn)
gluetool.log.log_dict(logger.debug, 'raw message data', message._message)
if len(signature.parameters) != len(message._message[2]):
from . import Failure
Failure(
'actor signature parameters does not match message content',
signature=[name for name in signature.parameters.keys()],
arguments=[repr(arg) for arg in message._message[2]]
).handle(logger)
return {}
return {
name: message._message[2][index]
for index, name in enumerate(signature.parameters.keys())
}
def _get_message_limit(
message: dramatiq.broker.MessageProxy,
actor: 'Actor',
key: str,
default: int
) -> int:
value = cast(Optional[int], message.options.get(key))
if value:
return value
value = cast(Optional[int], actor.options.get(key))
if value:
return value
return default
def _message_max_retries(message: dramatiq.broker.MessageProxy, actor: 'Actor') -> int:
return _get_message_limit(message, actor, 'max_retries', DEFAULT_MAX_RETRIES)
def _message_min_backoff(message: dramatiq.broker.MessageProxy, actor: 'Actor') -> int:
return _get_message_limit(message, actor, 'min_backoff', dramatiq.middleware.retries.DEFAULT_MIN_BACKOFF)
def _message_max_backoff(message: dramatiq.broker.MessageProxy, actor: 'Actor') -> int:
return _get_message_limit(message, actor, 'max_backoff', dramatiq.middleware.retries.DEFAULT_MAX_BACKOFF)
def _message_backoff(
message: dramatiq.broker.MessageProxy,
actor: 'Actor',
retries: int
) -> int:
return cast(
int,
compute_backoff(
retries,
factor=_message_min_backoff(message, actor),
max_backoff=_message_max_backoff(message, actor)
)[1]
)
def _retry_message(
logger: gluetool.log.ContextAdapter,
broker: dramatiq.broker.Broker,
message: dramatiq.message.Message,
actor: 'Actor',
exc_info: Optional['ExceptionInfoType'] = None
) -> None:
"""
Enqueue a given message while increasing its "retried" count by 1.
"""
message.options['retries'] = message.options.get('retries', 0) + 1
if exc_info:
message.options['traceback'] = '\n'.join(traceback.format_exception(*exc_info, limit=30))
retries = cast(int, message.options['retries'])
backoff = _message_backoff(message, actor, retries)
retry_at = datetime.datetime.utcnow() + datetime.timedelta(milliseconds=backoff)
logger.info(f'retries: message={message.message_id} retries={retries} backoff={backoff} retrying-at={retry_at}')
broker.enqueue(message, delay=backoff)
def _fail_message(
logger: gluetool.log.ContextAdapter,
message: dramatiq.message.Message,
error_message: str,
**details: Any
) -> None:
"""
Mark the given message as failed.
"""
from . import Failure
Failure(error_message, **details).handle(logger)
message.fail()
def _handle_tails(
logger: gluetool.log.ContextAdapter,
message: dramatiq.message.Message,
actor: 'Actor',
actor_arguments: Dict[str, Optional[str]]
) -> bool:
"""
Handle the "tails": when we run out of retries on a task, we cannot just let it fail, but we must take
of whatever resources it might have allocated.
We have different handlers for each chain of tasks, and these handlers take care of their particular cleanup.
The task here is to dispatch the correct handler.
"""
from .tasks import TailHandler, get_root_db
tail_handler = cast(TailHandler, actor.options['tail_handler'])
tail_logger = tail_handler.get_logger(logger, actor, actor_arguments)
db = get_root_db(tail_logger)
with db.get_session() as session:
if tail_handler.handle_tail(tail_logger, db, session, actor, actor_arguments):
return True
tail_logger.error('failed to handle the chain tail')
# This would cause the message to be dropped, effectively halting any work towards provisioning
# of the guest or capturing its logs.
#
# In the spirit of "let's try again...", falling through to rescheduling the original task.
#
# message.fail()
return False
class Retries(dramatiq.middleware.retries.Retries): # type: ignore[misc] # cannot subclass 'Retries'
@property
def actor_options(self) -> Set[str]:
return {
# These come from our superclass...
'max_retries',
'min_backoff',
'max_backoff',
'retry_when',
'throws',
# ... and this one is our addition so we could attach tail handler to each actor.
'tail_handler'
}
def after_process_message(
self,
broker: dramatiq.broker.Broker,
message: dramatiq.message.Message,
*,
# This is on purpose, our tasks never return anything useful.
result: None = None,
exception: Optional[BaseException] = None
) -> None:
# If the task did not raise an exception, there's obviously no need to retry it in the future. We're done.
if exception is None:
return
from . import get_logger
logger = get_logger()
actor = cast('Actor', broker.get_actor(message.actor_name))
# `retries` key is initialized to 0 - while other fields are optional, this one is expected to exist.
retries = message.options.setdefault('retries', 0)
max_retries = _message_max_retries(message, actor)
retry_when = actor.options.get('retry_when', self.retry_when)
actor_arguments = _actor_arguments(logger, message, actor)
guestname = actor_arguments['guestname'] if actor_arguments and 'guestname' in actor_arguments else None
if guestname:
logger = GuestLogger(logger, guestname)
logger.info(f'retries: message={message.message_id} actor={actor.actor_name} current-retries={retries} max-retries={max_retries}') # noqa: E501
if retry_when is not None and not retry_when(retries, exception) or \
retry_when is None and max_retries is not None and retries >= max_retries:
# Kill messages for tasks we don't handle in any better way. After all, they did run out of retires.
if actor.options.get('tail_handler') is None:
return _fail_message(
logger,
message,
f'retries exceeded for message {message.message_id}',
task_name=actor.actor_name,
task_args=actor_arguments,
guestname=guestname
)
if _handle_tails(logger, message, actor, actor_arguments) is True:
return
_retry_message(logger, broker, message, actor)
MESSAGE_NOTE_OPTION_KEY = 'artemis_notes'
NOTE_POOLNAME = 'poolname'
# TODO: once circular imports are resolved, use @with_context
def set_message_note(note: str, value: str) -> None:
"""
Attach a "note" to the current message.
Tasks may need to expose some additional information that cannot be passed to middleware directly, to extend
context available to the middleware. This information in stored under special keys of current message's ``options``
mapping.
"""
from .context import CURRENT_MESSAGE
options = cast(Dict[str, Dict[str, str]], CURRENT_MESSAGE.get().options)
options.setdefault(MESSAGE_NOTE_OPTION_KEY, {})
options[MESSAGE_NOTE_OPTION_KEY][note] = value
def get_metric_note(note: str) -> Optional[str]:
"""
Get an optional note from the current message.
"""
from .context import CURRENT_MESSAGE
options = cast(Dict[str, Dict[str, str]], CURRENT_MESSAGE.get().options)
return options.get(MESSAGE_NOTE_OPTION_KEY, {}).get(note)
class Prometheus(dramatiq.middleware.Middleware): # type: ignore[misc] # cannot subclass 'Middleware'
def __init__(self) -> None:
super(Prometheus, self).__init__()
self._delayed_messages: Set[str] = set()
self._message_start_times: Dict[str, int] = {}
@property
def actor_options(self) -> Set[str]:
return {MESSAGE_NOTE_OPTION_KEY}
def after_nack(self, broker: dramatiq.broker.Broker, message: dramatiq.message.Message) -> None:
from .metrics import TaskMetrics
TaskMetrics.inc_overall_rejected_messages(message.queue_name, message.actor_name)
def after_enqueue(self, broker: dramatiq.broker.Broker, message: dramatiq.message.Message, delay: int) -> None:
from .metrics import TaskMetrics
if "retries" in message.options:
TaskMetrics.inc_overall_retried_messages(message.queue_name, message.actor_name)
def before_delay_message(self, broker: dramatiq.broker.Broker, message: dramatiq.message.Message) -> None:
from .metrics import TaskMetrics
self._delayed_messages.add(message.message_id)
TaskMetrics.inc_current_delayed_messages(message.queue_name, message.actor_name)
def before_process_message(self, broker: dramatiq.broker.Broker, message: dramatiq.message.Message) -> None:
from .metrics import TaskMetrics
labels = (message.queue_name, message.actor_name)
if message.message_id in self._delayed_messages:
self._delayed_messages.remove(message.message_id)
TaskMetrics.dec_current_delayed_messages(message.queue_name, message.actor_name)
TaskMetrics.inc_current_messages(*labels)
self._message_start_times[message.message_id] = current_millis()
def after_process_message(
self,
broker: dramatiq.broker.Broker,
message: dramatiq.message.Message,
*,
# This is on purpose, our tasks never return anything useful.
result: None = None,
exception: Optional[BaseException] = None
) -> None:
from . import get_logger
from .metrics import TaskMetrics
logger = get_logger()
labels = (message.queue_name, message.actor_name)
actor = broker.get_actor(message.actor_name)
actor_arguments = _actor_arguments(logger, message, actor)
message_start_time = self._message_start_times.pop(message.message_id, current_millis())
message_duration = current_millis() - message_start_time
# Extract the poolname. `None` is a good starting value, but it turns out that most of the tasks
# relate to a particular pool in one way or another. Some tasks are given the poolname as a parameter,
# and some can tell us by attaching a note to the message.
poolname: Optional[str] = None
if 'poolname' in actor_arguments:
poolname = actor_arguments['poolname']
elif message.options:
poolname = get_metric_note(NOTE_POOLNAME)
TaskMetrics.inc_message_durations(
message.queue_name,
message.actor_name,
message_duration,
poolname
)
TaskMetrics.dec_current_messages(*labels)
TaskMetrics.inc_overall_messages(*labels)
if exception is not None:
TaskMetrics.inc_overall_errored_messages(*labels)
after_skip_message = after_process_message
class WorkerMetrics(dramatiq.middleware.Middleware): # type: ignore[misc] # cannot subclass 'Middleware'
"""
Dramatiq broker middleware spawning a thread to keep refreshing worker metrics.
"""
def __init__(self, worker_name: str, interval: int) -> None:
super(WorkerMetrics, self).__init__()
self.worker_name = worker_name
self.interval = interval
self._refresher: Optional[threading.Thread] = None
def after_worker_boot(self, signal: str, worker: dramatiq.worker.Worker) -> None:
from . import get_logger
from .metrics import WorkerMetrics as _WorkerMetrics
get_logger().warning('metrics refresher started')
self._refresher = _WorkerMetrics.spawn_metrics_refresher(
get_logger(),
self.worker_name,
self.interval,
lambda _worker: Ok((1, len(worker.workers))),
worker_instance=worker
)
class WorkerTraffic(dramatiq.middleware.Middleware): # type: ignore[misc] # cannot subclass 'Middleware'
KEY_WORKER_TASK = 'tasks.workers.traffic.{worker}.{pid}.{tid}'
KEY_WORKER_TASK_PATTERN = 'tasks.workers.traffic.*'
def __init__(
self,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
worker_name: str
) -> None:
super().__init__()
self.logger = logger
self.cache = cache
self.worker_name = worker_name
self.worker_pid = os.getpid()
@property
def current_key(self) -> str:
return self.KEY_WORKER_TASK.format(worker=self.worker_name, pid=self.worker_pid, tid=threading.get_ident())
def before_process_message(self, broker: dramatiq.broker.Broker, message: dramatiq.message.Message) -> None:
from .cache import set_cache_value
from .knobs import KNOB_WORKER_TRAFFIC_METRICS_TTL
from .metrics import WorkerTrafficTask
actor = broker.get_actor(message.actor_name)
actor_arguments = _actor_arguments(self.logger, message, actor)
tid = threading.get_ident()
set_cache_value(
self.logger,
self.cache,
self.current_key,
json.dumps(WorkerTrafficTask(
workername=self.worker_name,
worker_pid=self.worker_pid,
worker_tid=tid,
ctime=datetime.datetime.utcnow(),
queue=cast(str, message.queue_name),
actor=cast(str, message.actor_name),
args=actor_arguments
).serialize_to_json()).encode(),
ttl=KNOB_WORKER_TRAFFIC_METRICS_TTL.value
)
def after_process_message(
self,
broker: dramatiq.broker.Broker,
message: dramatiq.message.Message,
*,
# This is on purpose, our tasks never return anything useful.
result: None = None,
exception: Optional[BaseException] = None
) -> None:
from .cache import delete_cache_value
delete_cache_value(self.logger, self.cache, self.current_key)
after_skip_message = after_process_message
|
# daily_data_analysis
#
# Analisa is dadus de is imitiduras a sa dii.
# Analyse daily emission data.
#
# Copyright 2021 The Sardinia Sustainability Initiative
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
import os
import shared
def _reindex_daily(data_series, year):
""" Re-index a data series and fill missing measurements with nans
"""
idx = pd.date_range('01-01-' + str(year), '12-31-' + str(year))
data_series = data_series.reindex(idx, fill_value=np.nan)
return data_series
def _reformat_monthly_data_by_day(montly_data, month_number, year):
""" Reformat the data about a month into a time series by day
"""
import math
measurements = pd.to_numeric(montly_data.array, errors='coerce')
day_of_month = pd.to_numeric(montly_data.index)
new_measurements = []
new_dates = []
for meas, day in zip(measurements, day_of_month):
# Remove the invalid or empty measurements
if not math.isnan(meas):
new_measurements.append(meas)
new_dates.append(datetime.date(year, month_number, day+1))
return pd.Series(new_measurements, index=new_dates)
def _convert_to_timeseries(daily_pollution_data, year):
""" Convert the weird format used by the Sardegna Ambiente dataset
"""
import datetime
import pandas as pd
# Exclude the "day of the month" column
pollution_data = daily_pollution_data.iloc[:, 1:]
data_series = pd.Series(dtype='float64')
# Iterate through the months
month = 1
for curr_column in pollution_data.columns:
curr_data = pollution_data[curr_column]
month_series = _reformat_monthly_data_by_day(curr_data, month, year)
month = month+1
data_series = data_series.append(month_series)
data_series = _reindex_daily(data_series, year)
return data_series
def _plot_thresholds(thresholds, ax):
""" Plot horizontal lines with threshold levels
"""
if "WhoDaily" in thresholds:
ax.axhline(y=thresholds["WhoDaily"], color='red',
linestyle='--', label="WHO daily threshold", linewidth=1)
if "WhoYearly" in thresholds:
ax.axhline(y=thresholds["WhoYearly"], color='magenta',
linestyle='--', label="WHO yearly threshold", linewidth=1)
if "ItaDaily" in thresholds:
ax.axhline(y=thresholds["ItaDaily"], color='red', linestyle=':',
label="Italian daily threshold", linewidth=1)
if "ItaYearly" in thresholds:
ax.axhline(y=thresholds["ItaYearly"], color='magenta', linestyle=':',
label="Italian yearly threshold", linewidth=1)
def _plot_daily_pollution_subplot(data_file_name, year, thresholds, ax, title=''):
""" Plot the data about daily pollution
"""
# Read data
daily_pollution_data = pd.read_csv(
"../data/daily_emissions/" + data_file_name, comment="#")
daily_pollution_data = _convert_to_timeseries(daily_pollution_data, year)
# Line plot
daily_pollution_data.plot(label="Daily measurements", ax=ax, linewidth=1)
ax.set_ylabel("Concentration (μg/m³)")
ax.set_xlabel("Date")
ax.set_title(title)
ax.axhline(y=daily_pollution_data.mean(), color='black',
linestyle='-', label="Yearly mean", linewidth=1)
_plot_thresholds(thresholds, ax)
# Reduce the number of xticks
initial_xticks = ax.get_xticks(minor=False)
new_xticks = initial_xticks[0::2]
new_xticks = np.append(new_xticks, initial_xticks[-1])
ax.set_xticks(new_xticks)
ax.set_xticks([], minor=True)
def _plot_daily_pollution(data, output_file_name, title=''):
""" Plot the data about daily pollution
"""
import math
num_plots = len(data)
num_cols = min(2, num_plots)
num_rows = math.ceil(num_plots/num_cols)
single_plot_width = 0.48
single_plot_ratio = 2
_, axs = plt.subplots(num_rows, num_cols,
figsize=shared.figure_size(
single_plot_width*num_cols,
single_plot_ratio/num_rows))
axs = axs.reshape(-1).tolist()
for curr_data, curr_ax in zip(data, axs):
_plot_daily_pollution_subplot(
curr_data['file'], curr_data['year'], curr_data['thresholds'], curr_ax, curr_data['title'])
# Save figure
shared.save_figure(output_file_name)
pm25_thresholds = {
"WhoYearly": 10,
"WhoDaily": 25+0.1,
"ItaYearly": 25-0.1
}
pm10_thresholds = {
"WhoYearly": 20,
"WhoDaily": 50+0.1,
"ItaYearly": 40,
"ItaDaily": 50-0.1
}
shared.set_font_defaults()
# Monserrato
monserrato_data = [
{
'file': "CENMO1-Anno-2019-PM2.5.csv",
'year': 2019,
'thresholds': pm25_thresholds,
'title': "PM 2.5 emissions 2019"
}, {
'file': "CENMO1-Anno-2019-PM10.csv",
'year': 2019,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2019"
}, {
'file': "CENMO1-Anno-2020-PM2.5.csv",
'year': 2020,
'thresholds': pm25_thresholds,
'title': "PM 2.5 emissions in 2020"
}, {
'file': "CENMO1-Anno-2020-PM10.csv",
'year': 2020,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2020"
}]
_plot_daily_pollution(monserrato_data, "cenmo1-daily",
title='Daily emissions in Monserrato')
cagliari_data = [
{
'file': "CENCA1-Anno-2019-PM2.5.csv",
'year': 2019,
'thresholds': pm25_thresholds,
'title': "PM 2.5 emissions 2019"
}, {
'file': "CENCA1-Anno-2019-PM10.csv",
'year': 2019,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2019"
}, {
'file': "CENCA1-Anno-2020-PM2.5.csv",
'year': 2020,
'thresholds': pm25_thresholds,
'title': "PM 2.5 emissions in 2020"
}, {
'file': "CENCA1-Anno-2020-PM10.csv",
'year': 2020,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2020"
}]
_plot_daily_pollution(cagliari_data, "cenca1-daily",
title='Daily emissions in Cagliari')
# Quartu
quartu_data = [
{
'file': "CENQU1-Anno-2019-PM10.csv",
'year': 2019,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2019"
}, {
'file': "CENQU1-Anno-2020-PM10.csv",
'year': 2020,
'thresholds': pm10_thresholds,
'title': "PM 10 emissions in 2020"
}]
_plot_daily_pollution(quartu_data, "cenqu1-daily",
title="Daily emissions in Quartu Sant'Elena")
|
import os
import time
import datetime
from uuid import uuid4
class EnvironmentVariables:
"""
This class manage environment variable parameters
"""
def __init__(self):
self._environment_variables_dict = {}
# env files override true ENV. Not best order, but easier to write :/
# .env.generated can be auto-generated (by an external tool) based on the local cluster's configuration.
for env in ".env", ".env.generated":
try:
with open(env) as f:
for line in f.readlines():
key, found , value = line.strip().partition("=")
if not found:
print("ERROR: invalid line in {env}: {line.strip()}")
continue
if key in os.environ: continue # prefer env to env file
os.environ[key] = value
except FileNotFoundError: pass # ignore
##################################################################################################
# dynamic parameters - configure for local run
# parameters for running workload
# This path is github actions runner path (benchmark-operator should be cloned here)
self._environment_variables_dict['runner_path'] = os.environ.get('RUNNER_PATH', '/tmp')
# This path is for vm/pod/prometheus run artifacts
self._environment_variables_dict['run_artifacts'] = os.environ.get('RUN_ARTIFACTS', os.path.join(self._environment_variables_dict['runner_path'], 'benchmark-runner-run-artifacts'))
# cluster: 'openshift'(Default)/ 'kubernetes'
self._environment_variables_dict['cluster'] = os.environ.get('CLUSTER', 'openshift')
# dynamic parameters - configure for local run
self._environment_variables_dict['workload'] = os.environ.get('WORKLOAD', '')
self._environment_variables_dict['kubeadmin_password'] = os.environ.get('KUBEADMIN_PASSWORD', '')
# PIN=node selector
self._environment_variables_dict['pin_node_benchmark_operator'] = os.environ.get('PIN_NODE_BENCHMARK_OPERATOR', '')
self._environment_variables_dict['pin_node1'] = os.environ.get('PIN_NODE1', '')
self._environment_variables_dict['pin_node2'] = os.environ.get('PIN_NODE2', '')
# ElasticSearch
self._environment_variables_dict['elasticsearch'] = os.environ.get('ELASTICSEARCH', '')
self._environment_variables_dict['elasticsearch_port'] = os.environ.get('ELASTICSEARCH_PORT', '')
self._environment_variables_dict['elasticsearch_user'] = os.environ.get('ELASTICSEARCH_USER', '')
self._environment_variables_dict['elasticsearch_password'] = os.environ.get('ELASTICSEARCH_PASSWORD', '')
# 'http'(Default) / 'https' to use SSL to connect ElasticSearch
self._environment_variables_dict['elasticsearch_url_protocol'] = os.environ.get('ELASTICSEARCH_URL_PROTOCOL', 'http')
# Workaround for Kata CPU offline problem in 4.9/4.10
# Set to True to
self._environment_variables_dict['kata_cpuoffline_workaround'] = os.environ.get('KATA_CPUOFFLINE_WORKAROUND', '')
# Scale
self._environment_variables_dict['scale'] = os.environ.get('SCALE', '')
# list of nodes per pod/vm, scale number per node, e.g: [ 'master-1', 'master-2' ] - run 1 pod/vm in each node
self._environment_variables_dict['scale_nodes'] = os.environ.get('SCALE_NODES', "")
self._environment_variables_dict['redis'] = os.environ.get('REDIS', '')
# default parameter - change only if needed
# Parameters below related to 'run_workload()'
self._environment_variables_dict['workloads'] = ['stressng_pod', 'stressng_vm', 'stressng_kata',
'uperf_pod', 'uperf_vm', 'uperf_kata',
'hammerdb_pod_mariadb', 'hammerdb_vm_mariadb', 'hammerdb_kata_mariadb',
'hammerdb_pod_postgres', 'hammerdb_vm_postgres', 'hammerdb_kata_postgres',
'hammerdb_pod_mssql', 'hammerdb_vm_mssql', 'hammerdb_kata_mssql',
'vdbench_pod', 'vdbench_kata', 'vdbench_vm']
# benchmark-operator workload types
self._environment_variables_dict['workload_namespaces'] = {
'stressng': 'benchmark-operator',
'hammerdb': 'benchmark-operator',
'uperf': 'benchmark-operator',
'vdbench': 'benchmark-runner',
}
# Choose default namespace
base_workload = self._environment_variables_dict['workload'].split('_')[0]
if os.environ.get('NAMESPACE'):
self._environment_variables_dict['namespace'] = os.environ.get('NAMESPACE')
elif base_workload in self._environment_variables_dict['workload_namespaces']:
default_namespace = self._environment_variables_dict['workload_namespaces'][base_workload]
self._environment_variables_dict['namespace'] = os.environ.get('NAMESPACE', default_namespace)
else:
# TBD if this is not set
self._environment_variables_dict['namespace'] = 'benchmark-operator'
# run workload with odf pvc True/False. True=ODF, False=Ephemeral
self._environment_variables_dict['odf_pvc'] = os.environ.get('ODF_PVC', 'True')
# Workloads that required ODF
self._environment_variables_dict['workloads_odf_pvc'] = ['vdbench', 'hammerdb']
# This parameter get from Test_CI.yml file
self._environment_variables_dict['build_version'] = os.environ.get('BUILD_VERSION', '1.0.0')
# collect system metrics True/False - required by benchmark-operator
if self._environment_variables_dict['elasticsearch']:
self._environment_variables_dict['system_metrics'] = os.environ.get('SYSTEM_METRICS', 'True')
else:
self._environment_variables_dict['system_metrics'] = os.environ.get('SYSTEM_METRICS', 'False')
# CI status update once at the end of CI pass/failed
self._environment_variables_dict['ci_status'] = os.environ.get('CI_STATUS', '')
# Valid run types
self._environment_variables_dict['run_types'] = ['test_ci', 'func_ci', 'perf_ci']
# Run type test_ci/func_ci/perf_ci, default test_ci same environment as func_ci
self._environment_variables_dict['run_type'] = os.environ.get('RUN_TYPE', 'test_ci')
self._environment_variables_dict['runner_type'] = os.environ.get('RUNNER_TYPE')
self._environment_variables_dict['config_from_args'] = os.environ.get('CONFIG_FROM_ARGS')
self._environment_variables_dict['template_in_workload_dir'] = os.environ.get('TEMPLATE_IN_WORKLOAD_DIR')
# Run uuid
self._environment_variables_dict['uuid'] = os.environ.get('UUID', str(uuid4()))
self._environment_variables_dict['trunc_uuid'] = self._environment_variables_dict['uuid'].split('-')[0]
# Benchmark runner IBM Cloud Object Storage run artifacts hierarchy, not part of a POSIX path ('/' a key seperator, '-' file name convenstion )
self._environment_variables_dict['date_key'] = datetime.datetime.now().strftime("%Y/%m/%d")
self._environment_variables_dict['time_stamp_format'] = os.path.join(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S'))
# Benchmark runner local run artifacts path with time stamp format
self._environment_variables_dict['run_artifacts_path'] = os.environ.get('RUN_ARTIFACTS_PATH')
if not self._environment_variables_dict['run_artifacts_path']:
self._environment_variables_dict['run_artifacts_path'] = os.path.join(self._environment_variables_dict['run_artifacts'], f"{self._environment_variables_dict['workload'].replace('_', '-')}-{self._environment_variables_dict['time_stamp_format']}")
# None(Default)/ 'True' to save local(/tmp) artifacts files
self._environment_variables_dict['save_artifacts_local'] = os.environ.get('SAVE_ARTIFACTS_LOCAL', None)
# None/ 'True'(Default) to enable prometheus snapshot
self._environment_variables_dict['enable_prometheus_snapshot'] = os.environ.get('ENABLE_PROMETHEUS_SNAPSHOT', 'True')
# end dynamic parameters - configure for local run
##################################################################################################
# ** DO NOT CHANGE THE PARAMETERS BELOW **
# Constant parameters
# Parameters below related to 'azure_cluster_start_stop()'
# Azure details
self._environment_variables_dict['azure_cluster_stop'] = os.environ.get('AZURE_CLUSTER_STOP', '')
self._environment_variables_dict['azure_cluster_start'] = os.environ.get('AZURE_CLUSTER_START', '')
self._environment_variables_dict['azure_clientid'] = os.environ.get('AZURE_CLIENTID', '')
self._environment_variables_dict['azure_secret'] = os.environ.get('AZURE_SECRET', '')
self._environment_variables_dict['azure_tenantid'] = os.environ.get('AZURE_TENANTID', '')
self._environment_variables_dict['azure_subscriptionid'] = os.environ.get('AZURE_SUBSCRIPTIONID', '')
self._environment_variables_dict['azure_resource_group_name'] = os.environ.get('AZURE_RESOURCE_GROUP_NAME', '')
self._environment_variables_dict['azure_vm_name'] = os.environ.get('AZURE_VM_NAME', '')
# IBM details
self._environment_variables_dict['region_name'] = os.environ.get('IBM_REGION_NAME', '')
# None(default) - must for unittest
self._environment_variables_dict['endpoint_url'] = os.environ.get('IBM_ENDPOINT_URL', None)
self._environment_variables_dict['access_key_id'] = os.environ.get('IBM_ACCESS_KEY_ID', '')
self._environment_variables_dict['secret_access_key'] = os.environ.get('IBM_SECRET_ACCESS_KEY', '')
self._environment_variables_dict['bucket'] = os.environ.get('IBM_BUCKET', '')
self._environment_variables_dict['key'] = os.environ.get('IBM_KEY', '')
# Parameters below related to 'install_ocp()'
# MANDATORY for OCP install: install ocp version - insert version to install i.e. 'latest-4.8' : https://mirror.openshift.com/pub/openshift-v4/clients/ocp
self._environment_variables_dict['install_ocp_version'] = os.environ.get('INSTALL_OCP_VERSION', '')
# There are 2 steps run_ibm_ocp_ipi_installer/verify_install_complete
self._environment_variables_dict['install_step'] = os.environ.get('INSTALL_STEP', '')
# dev or ga (/ocp-dev-preview/ or /ocp/ )
self._environment_variables_dict['ocp_version_build'] = os.environ.get('OCP_VERSION_BUILD', '')
# github repository
self._environment_variables_dict['github_repository_short'] = os.environ.get('GITHUB_REPOSITORY_SHORT', '')
# Parameters below related to 'install_resource()'
# MANDATORY for OCP resource install: 'True' for install resources
self._environment_variables_dict['install_ocp_resources'] = os.environ.get('INSTALL_OCP_RESOURCES', '')
# cnv version
self._environment_variables_dict['cnv_version'] = os.environ.get('CNV_VERSION', '')
# QUAY_USERNAME for nightly build
self._environment_variables_dict['quay_username'] = os.environ.get('QUAY_USERNAME', '')
# QUAY_PASSWORD for nightly build
self._environment_variables_dict['quay_password'] = os.environ.get('QUAY_PASSWORD', '')
# odf version
self._environment_variables_dict['odf_version'] = os.environ.get('ODF_VERSION', '')
# number fo odf disk from ['sdb', 'sdc', 'sdd', 'sde']
self._environment_variables_dict['num_odf_disk'] = os.environ.get('NUM_ODF_DISK', 1)
# install resources list
self._environment_variables_dict['install_resources_list'] = os.environ.get('INSTALL_RESOURCES_LIST', '')
# Parameters below related to 'install_ocp()' and 'install_resource()'
# Mandatory: OCP environment flavor PERF or FUNC
self._environment_variables_dict['ocp_env_flavor'] = os.environ.get('OCP_ENV_FLAVOR', 'FUNC')
# IBM details
self._environment_variables_dict['ibm_api_key'] = os.environ.get('IBM_API_KEY', '')
# github token
self._environment_variables_dict['github_token'] = os.environ.get('GITHUB_TOKEN', '')
self.__ocp_env_flavor = self._environment_variables_dict['ocp_env_flavor']
self._environment_variables_dict['worker_ids'] = os.environ.get(f'{self.__ocp_env_flavor}_WORKER_IDS', "")
self._environment_variables_dict['provision_ip'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_IP', '')
# Placed on secret only
self._environment_variables_dict['provision_private_key'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_PRIVATE_KEY', '')
# For internal private key path
self._environment_variables_dict['provision_private_key_path'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_PRIVATE_KEY_PATH', '')
self._environment_variables_dict['container_private_key_path'] = os.environ.get('CONTAINER_PRIVATE_KEY_PATH', '')
self._environment_variables_dict['provision_user'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_USER', '')
self._environment_variables_dict['provision_oc_user'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_OC_USER', '')
self._environment_variables_dict['provision_port'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_PORT', '')
self._environment_variables_dict['provision_kubeadmin_password_path'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_KUBEADMIN_PASSWORD_PATH', '')
self._environment_variables_dict['provision_kubeconfig_path'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_KUBECONFIG_PATH', '')
self._environment_variables_dict['container_kubeconfig_path'] = os.environ.get('CONTAINER_KUBECONFIG_PATH', '')
self._environment_variables_dict['provision_installer_path'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_INSTALLER_PATH', '')
self._environment_variables_dict['provision_installer_cmd'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_INSTALLER_CMD', '')
self._environment_variables_dict['provision_installer_log'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_INSTALLER_LOG', '')
# remote ssh timeout - 3 hours for installation time
self._environment_variables_dict['provision_timeout'] = os.environ.get(f'{self.__ocp_env_flavor}_PROVISION_TIMEOUT', '10800')
# General timeout - 1.5 hours wait for pod/vm/upload data to elasticsearch
self._environment_variables_dict['timeout'] = os.environ.get(f'{self.__ocp_env_flavor}_TIMEOUT', '3600')
# Benchmark runner run artifacts url
self._environment_variables_dict['run_artifacts_url'] = os.environ.get(f'{self.__ocp_env_flavor}_RUN_ARTIFACTS_URL', '')
# Parameters below related to 'update_ci_status()' - No need to configure update auto by ci
# CI run time
self._environment_variables_dict['ci_minutes_time'] = os.environ.get('CI_MINUTES_TIME', 0)
# Get this parameter from install resource process
self._environment_variables_dict['ocp_resource_install_minutes_time'] = os.environ.get('OCP_RESOURCE_INSTALL_MINUTES_TIME', 0)
# benchmark-operator last commit id
self._environment_variables_dict['benchmark_operator_id'] = os.environ.get('BENCHMARK_OPERATOR_ID', '')
# benchmark-wrapper last commit id
self._environment_variables_dict['benchmark_wrapper_id'] = os.environ.get('BENCHMARK_WRAPPER_ID', '')
# Node Selector functionality
if self._environment_variables_dict['pin_node1']:
self._environment_variables_dict['pin'] = 'true'
else:
self._environment_variables_dict['pin'] = 'false'
# if pin_node2 not exist, get pin_node1 value
if self._environment_variables_dict['pin_node1'] and not self._environment_variables_dict['pin_node2']:
self._environment_variables_dict['pin_node2'] = self._environment_variables_dict['pin_node1']
# ElasticSearch url
if self._environment_variables_dict.get('elasticsearch_password', ''):
self._environment_variables_dict['elasticsearch_url'] = f"{self._environment_variables_dict['elasticsearch_url_protocol']}://{self._environment_variables_dict.get('elasticsearch_user', '')}:{self._environment_variables_dict.get('elasticsearch_password', '')}@{self._environment_variables_dict.get('elasticsearch', '')}:{self._environment_variables_dict.get('elasticsearch_port', '')}"
else:
if self._environment_variables_dict['elasticsearch'] and self._environment_variables_dict.get('elasticsearch_port', ''):
self._environment_variables_dict['elasticsearch_url'] = f"{self._environment_variables_dict['elasticsearch_url_protocol']}://{self._environment_variables_dict.get('elasticsearch', '')}:{self._environment_variables_dict.get('elasticsearch_port', '')}"
else:
self._environment_variables_dict['elasticsearch_url'] = ''
# OpenShift or kubernetes support, OpenShift: oc, kubectl || kubernetes: kubectl
if self._environment_variables_dict['cluster'] == 'kubernetes':
self._environment_variables_dict['cli'] = 'kubectl'
self._environment_variables_dict['odf_pvc'] = 'False'
self._environment_variables_dict['enable_prometheus_snapshot'] = None
else:
self._environment_variables_dict['cli'] = os.environ.get('CLI', 'oc')
@property
def workloads_list(self):
"""
This method is getter
"""
return self._environment_variables_dict['workloads']
@property
def environment_variables_dict(self):
"""
This method is getter
"""
return self._environment_variables_dict
@property
def run_types_list(self):
"""
This method is getter
"""
return self._environment_variables_dict['run_types']
@environment_variables_dict.setter
def environment_variables_dict(self, value: dict):
"""
This method is setter
"""
self._environment_variables_dict = value
def get_workload_namespace(self, workload: str):
"""
Return the workload namespace for a given workload
"""
if workload in self._environment_variables_dict['workloads'] and workload.split('_')[0] in self._environment_variables_dict['workload_namespaces']:
return self._environment_variables_dict['workload_namespaces'][workload.split('_')[0]]
else:
return None
environment_variables = EnvironmentVariables()
|
"""
Split, Join, enumerate em Python
*Split - Dividir uma string #str
*Join - Juntar uma lista # Str
% count(variável) server para ver quantas vezes a variavel dentro dos pareteses aparece
* Enumerate - Enumerar elementos da lista # interáveis
* strip() Remove o espaço no inicio e no fim do string
"""
'''string = "O Brasil é o o o o o país do futebal,o Brasil é Penta"
lista1 = string.split(' ')
lista2 = string.split(',')
# print(lista1)
# print(lista2)
palavra =''
contagem = 0
for valor in lista1:
qtd_vezes = lista1.count(valor)
if qtd_vezes > contagem:
contagem = qtd_vezes
palavra = valor
print(f' A palavra que aparece mais vezes é {palavra} (`{contagem} x)')'''
'''string = 'Acesse o site jw.org e aprenda sobre Deus'
lista = string.split(' ')
string2 = '.'.join(lista)
print(string2)'''
# string = 'Acesse o site jw.org e aprenda sobre Deus'
# lista = string.split(' ')
# for indice, nome in enumerate(lista):
# print(indice,nome)
# lista = ['João ','Lucas','Alcãntara','Moreira','dos','Santos']
# n1,n2,n3,n4,n5,n6 = lista
# print(n1,n2,n6)
#
# lista = [
# ['João ','Lucas','Alcântara'],
# ['Moreira','dos','Santos'],
# ['Lasla','do','Rosario','Brito'],
# ]
# print(lista[0][2]) # índice um da lista mãe índice 2 da lista filho
# enumerada = enumerate(lista)
# print(list(enumerada))
lista = [
['João ','Lucas','Alcântara'],
['Moreira','dos','Santos'],
['Lasla','do','Rosario','Brito'],
]
for v1 ,v2 in enumerate(lista,50) :
print(v1, v2)
|