text string | size int64 | token_count int64 |
|---|---|---|
import torch
import torch.utils.data as data
from torchsummary import summary
from datasets import get_dataset, HyperX,sHyperX
from models import get_model, train,test
from utils import sample_gt,get_device,metrics,compute_imf_weights
import visdom
import argparse
import numpy as np
import cv2
from models_liu import train_liu
parser = argparse.ArgumentParser(description="Run deep learning experiments on"
" various hyperspectral datasets")
parser.add_argument('--dataset', type=str, default=None,
help="Dataset to use.")
parser.add_argument('--model', type=str, default=None,
help="Model to train. Available:\n"
"SVM (linear), "
"SVM_grid (grid search on linear, poly and RBF kernels), "
"baseline (fully connected NN), "
"hu (1D CNN), "
"hamida (3D CNN + 1D classifier), "
"lee (3D FCN), "
"chen (3D CNN), "
"li (3D CNN), "
"he (3D CNN), "
"luo (3D CNN), "
"sharma (2D CNN), "
"boulch (1D semi-supervised CNN), "
"liu (3D semi-supervised CNN), "
"mou (1D RNN)")
parser.add_argument('--folder', type=str, help="Folder where to store the "
"datasets (defaults to the current working directory).",
default="./Datasets/")
parser.add_argument('--cuda', type=int, default=0,
help="Specify CUDA device (defaults to -1, which learns on CPU)")
parser.add_argument('--runs', type=int, default=1, help="Number of runs (default: 1)")
parser.add_argument('--restore', type=str, default=None,
help="Weights to use for initialization, e.g. a checkpoint")
# Dataset options
group_dataset = parser.add_argument_group('Dataset')
group_dataset.add_argument('--training_sample', type=float, default=10,
help="Percentage of samples to use for training (default: 10%%)")
group_dataset.add_argument('--sampling_mode', type=str, default='random', help="Sampling mode"
" (random sampling or disjoint, default= random)"
)
group_dataset.add_argument('--train_set', type=str, default=None,
help="Path to the train ground truth (optional, this "
"supersedes the --sampling_mode option)")
group_dataset.add_argument('--test_set', type=str, default=None,
help="Path to the test set (optional, by default "
"the test_set is the entire ground truth minus the training)")
# Training options
group_train = parser.add_argument_group('Training')
group_train.add_argument('--epoch', type=int, help="Training epochs (optional, if"
" absent will be set by the model)")
group_train.add_argument('--patch_size', type=int,
help="Size of the spatial neighbourhood (optional, if "
"absent will be set by the model)")
group_train.add_argument('--lr', type=float,
help="Learning rate, set by the model if not specified.")
group_train.add_argument('--class_balancing', action='store_true',
help="Inverse median frequency class balancing (default = False)")
group_train.add_argument('--batch_size', type=int,
help="Batch size (optional, if absent will be set by the model")
group_train.add_argument('--test_stride', type=int, default=1,
help="Sliding window step stride during inference (default = 1)")
# Data augmentation parameters
group_da = parser.add_argument_group('Data augmentation')
group_da.add_argument('--flip_augmentation', action='store_true',
help="Random flips (if patch_size > 1)")
group_da.add_argument('--radiation_augmentation', action='store_true',
help="Random radiation noise (illumination)")
group_da.add_argument('--mixture_augmentation', action='store_true',
help="Random mixes between spectra")
parser.add_argument('--with_exploration', action='store_true',
help="See data exploration visualization")
parser.add_argument('--download', type=str, default=None, nargs='+',
help="Download the specified datasets and quits.")
args = parser.parse_args()
CUDA_DEVICE = get_device(args.cuda)
# % of training samples
SAMPLE_PERCENTAGE = args.training_sample
# Data augmentation ?
FLIP_AUGMENTATION = args.flip_augmentation
RADIATION_AUGMENTATION = args.radiation_augmentation
MIXTURE_AUGMENTATION = args.mixture_augmentation
# Dataset name
DATASET =args.dataset
# Model name
MODEL = args.model
# Number of runs (for cross-validation)
N_RUNS = args.runs
# Spatial context size (number of neighbours in each spatial direction)
PATCH_SIZE = args.patch_size
# Add some visualization of the spectra ?
DATAVIZ = args.with_exploration
# Target folder to store/download/load the datasets
FOLDER = args.folder
# Number of epochs to run
EPOCH = args.epoch
# Sampling mode, e.g random sampling
SAMPLING_MODE = args.sampling_mode
# Pre-computed weights to restore
CHECKPOINT = args.restore
# Learning rate for the SGD
LEARNING_RATE = args.lr
# Automated class balancing
CLASS_BALANCING = args.class_balancing
# Training ground truth file
TRAIN_GT = args.train_set
# Testing ground truth file
TEST_GT = args.test_set
TEST_STRIDE = args.test_stride
MODEL='hamida'
DATASET='train2'
vDATASET='test2'
MODE='disjoint'
ttest=True
if ttest==False:
SAMPLE_PERCENTAGE=0.2
else:
SAMPLE_PERCENTAGE=1
# CHECKPOINT="checkpoints/hamida_et_al/name of the dataset used/True2020-07-02 02:12:54.440903_epoch18_0.72.pth"
viz = visdom.Visdom(env=DATASET + ' ' + MODEL)
hyperparams = vars(args)
img, gt, LABEL_VALUES, IGNORED_LABELS, RGB_BANDS, palette = get_dataset(DATASET,
FOLDER)
# top, bottom, left, right = [30]*4
# img=cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# gt=cv2.copyMakeBorder(gt, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# qmin=[]
# qmax=[]
img = img.astype('float32')
qmax=np.load("maxint16.npy")
qmin=np.load("minint16.npy")
for i in range(img.shape[-1]):
# qmin.append(np.min(img[:,:,i]))
# qmax.append(np.max(img[:,:,i]))
img[:,:,i] = (img[:,:,i] - qmin[i]) /(qmax[i] - qmin[i])
N_CLASSES=len(LABEL_VALUES)
# N_CLASSES = 24
N_BANDS=img.shape[-1]
hyperparams = vars(args)
hyperparams.update({'n_classes': N_CLASSES, 'n_bands': N_BANDS, 'ignored_labels': IGNORED_LABELS, 'device': CUDA_DEVICE})
hyperparams = dict((k, v) for k, v in hyperparams.items() if v is not None)
if SAMPLE_PERCENTAGE!=1:
test_gt, train_gt = sample_gt(gt, SAMPLE_PERCENTAGE, mode=MODE)
#######
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
########
else:
train_gt = test_gt= gt
#######
# weights = compute_imf_weights(train_gt, N_CLASSES, IGNORED_LABELS)
# hyperparams['weights'] = torch.from_numpy(weights).float()
##########
model, optimizer, loss, hyperparams = get_model(MODEL, **hyperparams)
train_dataset = HyperX(img, train_gt, **hyperparams)
train_loader = data.DataLoader(train_dataset,
batch_size=hyperparams['batch_size'],
pin_memory=hyperparams['device'],
shuffle=True)
if ttest==True:
del img
vimg, vgt, vLABEL_VALUES, vIGNORED_LABELS, vRGB_BANDS, vpalette = get_dataset(vDATASET,
FOLDER)
# top, bottom, left, right = [30]*4
# vimg=cv2.copyMakeBorder(vimg, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# vgt=cv2.copyMakeBorder(vgt, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# vqmin,vqmax=[],[]
vimg = vimg.astype('float32')
for i in range(vimg.shape[-1]):
# vqmin.append(np.min(vimg[:,:,i]))
# vqmax.append(np.max(vimg[:,:,i]))
vimg[:,:,i] = (vimg[:,:,i] - qmin[i]) /(qmax[i] - qmin[i])
# vtrain_gt, vtest_gt = sample_gt(vgt, 0.1, mode='random')
val_dataset = HyperX(vimg, vgt, **hyperparams)
# del vimg
val_loader = data.DataLoader(val_dataset,
pin_memory=hyperparams['device'],
batch_size=hyperparams['batch_size'],drop_last=True)
del vimg
else:
val_dataset = HyperX(img, test_gt, **hyperparams)
val_loader = data.DataLoader(val_dataset,
pin_memory=hyperparams['device'],
batch_size=hyperparams['batch_size'],drop_last=True)
# del img
print(np.count_nonzero(train_gt))
print(np.count_nonzero(test_gt))
print(np.count_nonzero(gt))
print(hyperparams)
print("Network :")
with torch.no_grad():
for input, _ in train_loader:
break
summary(model.to(hyperparams['device']), input.size()[1:])
if CHECKPOINT is not None:
model.load_state_dict(torch.load(CHECKPOINT))
if MODEL!="liu":
try:
train(model, optimizer, loss, train_loader, hyperparams['epoch'],
scheduler=hyperparams['scheduler'], device=hyperparams['device'],
supervision=hyperparams['supervision'], val_loader=val_loader,
display=viz,klepsia=klepsia)
except KeyboardInterrupt:
# Allow the user to stop the training
pass
if MODEL=="liu":
strain_dataset = sHyperX(img, train_gt, **hyperparams)
strain_loader = data.DataLoader(strain_dataset,
batch_size=hyperparams['batch_size'],
pin_memory=hyperparams['device'],
shuffle=True,drop_last=True)
try:
train_liu(model, optimizer, loss, train_loader, hyperparams['epoch'], val_loader,klepsia,strain_loader,
scheduler=hyperparams['scheduler'], device=hyperparams['device'],
display=viz)
except KeyboardInterrupt:
# Allow the user to stop the training
pass
# probabilities = test(model, vimg, hyperparams)
# prediction = np.argmax(probabilities, axis=-1)
# run_results = metrics(prediction, vgt, ignored_labels=[0], n_classes=N_CLASSES)
# cm=run_results['Confusion matrix']
# cmr = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# cmp = cm.astype('float') / cm.sum(axis=0)[np.newaxis,:]
# np.savetxt(MODEL+'cmr.csv',cmr,delimiter=',')
# np.savetxt(MODEL+'cm.csv',cm,delimiter=',')
# np.savetxt(MODEL+'cmp.csv',cmp,delimiter=',')
# rep=run_results['report']
# np.savetxt(MODEL+'rep.csv',rep,delimiter=',')
# import cv2
# cv2.imwrite(MODEL+'pred.tif',prediction)
| 11,432 | 3,765 |
import torch
import matplotlib.pyplot as plt
import numpy as np
import time
from torch import optim, nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from models.DecomposableAttention import DecomposableAttention
from models.BiLSTMMaxPooling import BiLSTMMaxPooling
from modules.Utils import utils
from modules.EarlyStopping import EarlyStopping
class Trainer:
def __init__(self, params, train_data, dev_data,
dev_mismatched_data, embedding):
self.params = params
self.train_data = train_data
self.dev_data = dev_data
self.dev_mismatched_data = dev_mismatched_data
self.epochs = params.epochs
print("Creating dataloaders")
self.cuda_available = torch.cuda.is_available()
self.train_loader = DataLoader(dataset=train_data,
shuffle=True,
batch_size=params.batch_size,
pin_memory=self.cuda_available,
collate_fn=(lambda x:
utils.collate_batch(
x,
self.params)))
self.dev_loader = DataLoader(dataset=dev_data,
shuffle=False,
batch_size=params.batch_size,
pin_memory=self.cuda_available)
self.dev_mismatched_loader = DataLoader(dataset=dev_mismatched_data,
shuffle=False,
batch_size=params.batch_size,
pin_memory=self.cuda_available)
self.string_fixer = "=========="
self.embedding = embedding
if self.params.log_dir:
self.writer = SummaryWriter(self.params.log_dir)
else:
self.writer = SummaryWriter()
self.writer_step = 0
def load(self, model_name="decomposable"):
print("Loading model")
if model_name == "decomposable":
self.model = DecomposableAttention(self.params, self.embedding)
else:
self.model = BiLSTMMaxPooling(self.params, self.embedding)
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad,
self.model.parameters()),
lr=self.params.lr)
self.start_time = time.time()
self.histories = {
"train_loss": np.empty(0, dtype=np.float32),
"train_acc": np.empty(0, dtype=np.float32),
"dev_matched_loss": np.empty(0, dtype=np.float32),
"dev_matched_acc": np.empty(0, dtype=np.float32),
"dev_mismatched_loss": np.empty(0, dtype=np.float32),
"dev_mismatched_acc": np.empty(0, dtype=np.float32)
}
self.early_stopping = EarlyStopping(
self.model, self.optimizer, patience=self.params.patience,
minimize=False)
if self.params.resume:
checkpoint = utils.load_checkpoint(self.params.resume)
if checkpoint is not None:
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.histories.update(checkpoint)
self.early_stopping.init_from_checkpoint(checkpoint)
print("Loaded model, Best Loss: %.8f, Best Acc: %.2f" %
(checkpoint['best'], checkpoint['best_acc']))
if self.cuda_available:
self.model = self.model.cuda()
print("Model loaded")
def train(self):
criterion = nn.CrossEntropyLoss()
start_epoch = 0
best_prec = 0
is_best = False
self.model.train()
print("Starting training")
self.print_info()
for epoch in range(start_epoch, self.params.epochs):
for i, (premise, hypo, labels) in enumerate(self.train_loader):
premise_batch = Variable(premise.long())
hypo_batch = Variable(hypo.long())
labels_batch = Variable(labels)
if self.cuda_available:
premise_batch = premise_batch.cuda()
hypo_batch = hypo_batch.cuda()
labels_batch = labels_batch.cuda(async=True)
self.optimizer.zero_grad()
output = self.model(premise_batch, hypo_batch)
loss = criterion(output, labels_batch.long())
loss.backward()
self.optimizer.step()
if self.params.extra_debug and \
(i + 1) % (self.params.batch_size * 4) == 0:
for n, p in filter(lambda np: np[1].grad is not None,
self.model.named_parameters()):
self.writer.add_histogram(
n, p.grad.data.cpu().numpy(),
global_step=self.writer_step)
self.writer_step += 1
print(('Epoch: [{0}/{1}], Step: [{2}/{3}], Loss: {4},')
.format(epoch + 1,
self.params.epochs,
i + 1,
len(self.train_loader),
loss.data[0]))
train_acc, train_loss = self.validate_model(self.train_loader,
self.model)
dev_matched_acc, dev_matched_loss = self.validate_model(
self.dev_loader, self.model)
dev_mismatched_acc, dev_mismatched_loss = self.validate_model(
self.dev_mismatched_loader, self.model)
self.histories['train_loss'] = np.append(
self.histories['train_loss'],
[train_loss])
self.histories['train_acc'] = np.append(
self.histories['train_acc'],
[train_acc])
self.histories['dev_matched_loss'] = np.append(
self.histories['dev_matched_loss'], [dev_matched_loss])
self.histories['dev_matched_acc'] = np.append(
self.histories['dev_matched_acc'], [dev_matched_acc])
self.histories['dev_mismatched_loss'] = np.append(
self.histories['dev_mismatched_loss'], [dev_mismatched_loss])
self.histories['dev_mismatched_acc'] = np.append(
self.histories['dev_mismatched_acc'], [dev_mismatched_acc])
if not self.early_stopping(dev_matched_loss, dev_matched_acc,
epoch, self.histories):
self.print_train_info(epoch, train_acc, train_loss,
dev_matched_acc, dev_matched_loss,
dev_mismatched_acc,
dev_mismatched_loss)
else:
print("Early stopping activated")
print("Restoring earlier state and stopping")
self.early_stopping.print_info()
self.plot_learning_curves(self.histories, "dev_matched_acc")
plt.show()
break
def validate_model(self, loader, model):
model.eval()
correct = 0
total = 0
total_loss = 0
for premise, hypo, labels in loader:
premise_batch = Variable(premise.long(), volatile=True)
hypo_batch = Variable(hypo.long(), volatile=True)
labels_batch = Variable(labels.long())
if self.cuda_available:
premise_batch = premise_batch.cuda()
hypo_batch = hypo_batch.cuda()
labels_batch = labels_batch.cuda()
output = model(premise_batch, hypo_batch)
loss = nn.functional.cross_entropy(output, labels_batch.long(),
size_average=False)
total_loss += loss.data[0]
total += len(labels_batch)
if not self.cuda_available:
correct += (labels_batch ==
output.max(1)[1]).data.cpu().numpy().sum()
else:
correct += (labels_batch == output.max(1)[1]).data.sum()
model.train()
average_loss = total_loss / total
return correct / total * 100, average_loss
def print_info(self):
print(self.string_fixer + " Data " + self.string_fixer)
print("Training set: %d examples" % (len(self.train_data)))
print("Validation set: %d examples" % (len(self.dev_data)))
print("Timestamp: %s" % utils.get_time_hhmmss())
print(self.string_fixer + " Params " + self.string_fixer)
print("Learning Rate: %f" % self.params.lr)
print("Dropout (p): %f" % self.params.dropout)
print("Batch Size: %d" % self.params.batch_size)
print("Epochs: %d" % self.params.epochs)
print("Patience: %d" % self.params.patience)
print("Resume: %s" % self.params.resume)
print("GRU Encode: %s" % str(self.params.gru_encode))
print("Cuda: %s" % str(torch.cuda.is_available()))
print("Batch Optimizations: %s" % str(self.params.use_optimizations))
print("Intra Attention: %s" % str(self.params.use_intra_attention))
print("Model Structure:")
print(self.model)
def print_train_info(self, epoch, train_acc, train_loss,
dev_acc, dev_loss,
dev_mismatched_acc, dev_mismatched_loss):
print((self.string_fixer + " Epoch: {0}/{1} " + self.string_fixer)
.format(epoch + 1, self.params.epochs))
print("Train Loss: %.8f, Train Acc: %.2f" % (train_loss, train_acc))
print("Dev Matched Loss: %.8f, Dev Matched Acc: %.2f" %
(dev_loss, dev_acc))
print("Dev Mismatched Loss: %.8f, Dev Mismatched Acc: %.2f" %
(dev_mismatched_loss, dev_mismatched_acc))
self.early_stopping.print_info()
print("Elapsed Time: %s" % (utils.get_time_hhmmss(self.start_time)))
print("Current timestamp: %s" % (utils.get_time_hhmmss()))
def plot_learning_curves(self, histories, key):
plt.cla()
plt.grid()
plt.plot(np.arange(histories[key].shape[0]), histories[key], c="green",
label=key)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel(key)
| 10,743 | 3,144 |
import scrapy
class GovSbirNewsSpider(scrapy.Spider):
name = 'gov.sbir.news'
start_urls = ["https://www.sbir.gov/news?page=0"]
def __init__(self):
for page_num in range(1, 23):
self.start_urls.append("https://www.sbir.gov/news?page=%d" % page_num)
def parse(self, response):
for div in response.css('div.news-view-rest-wrapper'):
types = []
for div_type in div.css('div.news-type > span'):
types.append(div_type.css('span::text').extract_first())
yield {
'title': div.css('div.news-title > a::text').extract_first(),
'href': 'https://www.sbir.gov' + div.css('div.news-title > a::attr(href)').extract_first(),
'date': div.css('div.news-date > strong > span::attr(content)').extract_first(),
'body': div.css('div.news-body::text').extract_first(),
'type': types
}
| 954 | 307 |
from django import forms
from django.forms.models import inlineformset_factory
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from schedule.models import *
class PersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ['name']
class AvailabilityForm(forms.ModelForm):
class Meta:
model = Person
exclude = ['conference']
OtherCommitmentFormset = inlineformset_factory(Person,
OtherCommitment,
extra=2,
fields=['start_date', 'end_date'])
class ProspectiveAdminField(forms.ModelMultipleChoiceField):
def clean(self, value):
if not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
models = []
id_values = []
for val in value:
if "@" in val:
try:
model, created = self.queryset.get_or_create(email=val)
except ValueError:
return ValidationError(
"Invalid email address",
code='invalid_email',
params={"pk": val})
models.append(model)
else:
try:
self.queryset.filter(pk=val)
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': val},
)
id_values.append(val)
qs = list(self.queryset.filter(pk__in=id_values))
pks = set(str(o.pk) for o in qs)
for val in id_values:
if str(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
self.run_validators(value)
return qs + models
class CreateConferenceForm(forms.ModelForm):
admins = forms.ModelMultipleChoiceField(User.objects.all(),
widget=forms.MultipleHiddenInput)
prospective_admins = ProspectiveAdminField(ProspectiveAdmin.objects.all(),
widget=forms.MultipleHiddenInput)
class Meta:
model = Conference
fields = ['name', 'public', 'admins', 'prospective_admins']
class UpdateConferenceForm(CreateConferenceForm):
class Meta(CreateConferenceForm.Meta):
fields = ['name', 'public', 'admins', 'prospective_admins', 'archived']
class VenueForm(forms.ModelForm):
class Meta:
model = Venue
fields = ['name']
class RoleTypeForm(forms.ModelForm):
class Meta:
model = RoleType
fields = ['role']
class EventForm(forms.ModelForm):
add_period = forms.BooleanField(required=False,
label="Add new period",
help_text='Add these start and end dates as a new "period" (e.g. a course block, track, etc)?')
period_name = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if instance is None:
self.fields['period'].queryset = Period.objects.none()
else:
self.fields['period'].queryset = instance.conference.period_set.all()
def clean_period_name(self):
if self.cleaned_data['add_period'] and not self.cleaned_data.get('period_name'):
raise ValidationError("Period name is required to add a period.")
if Period.objects.filter(period=self.cleaned_data['period_name'],
conference=self.instance.conference).exists():
raise ValidationError("That name is already in use.")
return self.cleaned_data['period_name']
def clean(self):
data = super(EventForm, self).clean()
if not (data['start_date'] or data['period']):
raise ValidationError("One of start date or period is required.")
return data
def save(self):
event = super(EventForm, self).save(commit=False)
if self.cleaned_data['add_period']:
event.period = Period.objects.create(
conference=self.instance.conference,
period=self.cleaned_data['period_name'],
start_date=event.start_date,
end_date=event.end_date)
event.save()
return event
class Meta:
model = Event
fields = ['title', 'start_date', 'end_date', 'period', 'description']
| 4,697 | 1,268 |
# noinspection PyPep8
# noinspection PyArgumentList
"""
AUTO-GENERATED BY `scripts/generate_protocol.py` using `data/browser_protocol.json`
and `data/js_protocol.json` as inputs! Please do not modify this file.
"""
import logging
from typing import Any, Optional, Union
from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase
log = logging.getLogger(__name__)
# StorageId: DOM Storage identifier.
class StorageId(ChromeTypeBase):
def __init__(self,
securityOrigin: Union['str'],
isLocalStorage: Union['bool'],
):
self.securityOrigin = securityOrigin
self.isLocalStorage = isLocalStorage
# Item: DOM Storage item.
Item = [str]
class DOMStorage(PayloadMixin):
""" Query and modify DOM storage.
"""
@classmethod
def clear(cls,
storageId: Union['StorageId'],
):
"""
:param storageId:
:type storageId: StorageId
"""
return (
cls.build_send_payload("clear", {
"storageId": storageId,
}),
None
)
@classmethod
def disable(cls):
"""Disables storage tracking, prevents storage events from being sent to the client.
"""
return (
cls.build_send_payload("disable", {
}),
None
)
@classmethod
def enable(cls):
"""Enables storage tracking, storage events will now be delivered to the client.
"""
return (
cls.build_send_payload("enable", {
}),
None
)
@classmethod
def getDOMStorageItems(cls,
storageId: Union['StorageId'],
):
"""
:param storageId:
:type storageId: StorageId
"""
return (
cls.build_send_payload("getDOMStorageItems", {
"storageId": storageId,
}),
cls.convert_payload({
"entries": {
"class": [Item],
"optional": False
},
})
)
@classmethod
def removeDOMStorageItem(cls,
storageId: Union['StorageId'],
key: Union['str'],
):
"""
:param storageId:
:type storageId: StorageId
:param key:
:type key: str
"""
return (
cls.build_send_payload("removeDOMStorageItem", {
"storageId": storageId,
"key": key,
}),
None
)
@classmethod
def setDOMStorageItem(cls,
storageId: Union['StorageId'],
key: Union['str'],
value: Union['str'],
):
"""
:param storageId:
:type storageId: StorageId
:param key:
:type key: str
:param value:
:type value: str
"""
return (
cls.build_send_payload("setDOMStorageItem", {
"storageId": storageId,
"key": key,
"value": value,
}),
None
)
class DomStorageItemAddedEvent(BaseEvent):
js_name = 'Domstorage.domStorageItemAdded'
hashable = ['storageId']
is_hashable = True
def __init__(self,
storageId: Union['StorageId', dict],
key: Union['str', dict],
newValue: Union['str', dict],
):
if isinstance(storageId, dict):
storageId = StorageId(**storageId)
self.storageId = storageId
if isinstance(key, dict):
key = str(**key)
self.key = key
if isinstance(newValue, dict):
newValue = str(**newValue)
self.newValue = newValue
@classmethod
def build_hash(cls, storageId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class DomStorageItemRemovedEvent(BaseEvent):
js_name = 'Domstorage.domStorageItemRemoved'
hashable = ['storageId']
is_hashable = True
def __init__(self,
storageId: Union['StorageId', dict],
key: Union['str', dict],
):
if isinstance(storageId, dict):
storageId = StorageId(**storageId)
self.storageId = storageId
if isinstance(key, dict):
key = str(**key)
self.key = key
@classmethod
def build_hash(cls, storageId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class DomStorageItemUpdatedEvent(BaseEvent):
js_name = 'Domstorage.domStorageItemUpdated'
hashable = ['storageId']
is_hashable = True
def __init__(self,
storageId: Union['StorageId', dict],
key: Union['str', dict],
oldValue: Union['str', dict],
newValue: Union['str', dict],
):
if isinstance(storageId, dict):
storageId = StorageId(**storageId)
self.storageId = storageId
if isinstance(key, dict):
key = str(**key)
self.key = key
if isinstance(oldValue, dict):
oldValue = str(**oldValue)
self.oldValue = oldValue
if isinstance(newValue, dict):
newValue = str(**newValue)
self.newValue = newValue
@classmethod
def build_hash(cls, storageId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class DomStorageItemsClearedEvent(BaseEvent):
js_name = 'Domstorage.domStorageItemsCleared'
hashable = ['storageId']
is_hashable = True
def __init__(self,
storageId: Union['StorageId', dict],
):
if isinstance(storageId, dict):
storageId = StorageId(**storageId)
self.storageId = storageId
@classmethod
def build_hash(cls, storageId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
| 6,922 | 1,951 |
import argparse
import csv
import dataset
engine_kwargs = {"pool_recycle": 3600, "connect_args": {'timeout': 300, "check_same_thread": False}}
def sample_stories(args):
print(args)
database = args["database"]
dataset_db = f"sqlite:///{database}"
csv_lines = []
with dataset.connect(dataset_db, engine_kwargs=engine_kwargs) as db:
stories = db.query(
f'SELECT * FROM story WHERE sentence_num >= {args["min_sentences"]} '
f'AND sentence_num <= {args["max_sentences"]} ORDER BY RANDOM() LIMIT {args["num_samples"]}')
for story in stories:
sentences = db.query(
f'SELECT * FROM sentence WHERE story_id = {story["id"]} ORDER BY id ')
sentence_text = [s["text"].replace("<newline>", " ") for s in sentences]
story_text = " ".join(sentence_text)
csv_lines.append((int(story["id"]), story_text))
with open(args["target"], 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["story_id", "text"])
for line in csv_lines:
writer.writerow(line)
# TODO: Setup initiall for
parser = argparse.ArgumentParser(
description='Randomly sample from the database stories')
parser.add_argument('--database', required=True, type=str, help="Output the saved weights of the Topic Model")
parser.add_argument('--target', required=True, type=str, help="Output CSV file")
parser.add_argument('--min-sentences', type=int, default=25, help="The min number of sentences.")
parser.add_argument('--max-sentences', type=int, default=75, help="The max number of sentences.")
parser.add_argument('--num-samples', type=int, default=500,
help="Number of stories to sample from. Should be less than the size of the dataset.")
args = parser.parse_args()
sample_stories(vars(args))
| 1,909 | 579 |
"""pdjn - the CLI tool of Pidjn"""
from setuptools import setup
setup(
name='pdjn',
version='0.1',
description="CLI tool for managing Pydjn sites",
url='https://github.com/hipikat/hpk.io/tree/master/src/cli',
license='ISC',
author='Ada Wright',
author_email='ada@hpk.io',
py_modules=['pdjn'],
install_requires=['click',],
entry_points='''
[console_scripts]
pdjn=pdjn:cli
''',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| 967 | 310 |
#!/usr/bin/env python
import sys
import linktap
import time
import logging
try:
import polyinterface
CLOUD = False
except ImportError:
import pgc_interface as polyinterface
CLOUD = True
LOGGER = polyinterface.LOGGER
logging.getLogger('urllib3').setLevel(logging.INFO)
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'LinkTap Controller'
# self.poly.onConfig(self.process_config)
self.username = ''
self.apiKey = ''
self.data = None
self.ready = False
self.retry_count = 1
def start(self):
LOGGER.info('Started LinkTap NodeServer')
if self.check_params():
self.discover()
def get_link_tap_devices(self):
lt = linktap.LinkTap(self.username, self.apiKey)
all_devices = lt.get_all_devices()
if all_devices == 'error':
LOGGER.info("get_link_tap_devices: The minimum interval of calling this API is 5 minutes.")
self.data = None
self.ready = False
return False
elif all_devices is None:
LOGGER.info("Get all devices failed")
self.data = None
self.ready = False
return False
else:
self.data = all_devices
self.ready = True
return True
def shortPoll(self):
# Update Watering Status
if self.ready:
# LOGGER.info("Updating Watering Status")
for node in self.nodes:
if self.nodes[node].address != self.address:
for gw in self.data['devices']:
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.nodes[node].address:
if tl['status'] == 'Connected':
link_tap = linktap.LinkTap(self.username, self.apiKey)
watering_status = link_tap.get_watering_status(tl['taplinkerId'])
# print(watering_status)
try:
if watering_status['status'] is not None:
if watering_status['status']['onDuration']:
self.nodes[node].setDriver('GV1', 1)
self.nodes[node].setDriver('GV2', watering_status['status']['onDuration'])
if watering_status['status']['total']:
self.nodes[node].setDriver('GV3', watering_status['status']['total'])
watering_total = int(watering_status['status']['total'])
watering_duration = int(watering_status['status']['onDuration'])
watering_elapsed = watering_total - watering_duration
self.nodes[node].setDriver('GV4', watering_elapsed)
else:
self.nodes[node].setDriver('GV1', 0)
self.nodes[node].setDriver('GV2', 0)
self.nodes[node].setDriver('GV3', 0)
self.nodes[node].setDriver('GV4', 0)
except TypeError:
pass
else:
pass
def longPoll(self):
if self.ready:
if self.get_link_tap_devices():
self.update()
else:
LOGGER.info("LinkTap Devices API returned None")
else:
pass
def update(self):
if self.ready:
for node in self.nodes:
if self.nodes[node].address != self.address:
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.nodes[node].address:
if gw['status'] == 'Connected':
self.nodes[node].setDriver('ST', 1, force=False)
else:
self.nodes[node].setDriver('ST', 0, force=False)
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.nodes[node].address:
if tl['status'] == 'Connected':
self.nodes[node].setDriver('ST', 1, force=False)
else:
self.nodes[node].setDriver('ST', 0, force=False)
self.nodes[node].setDriver('BATLVL', tl['batteryStatus'].strip('%'), force=False)
# self.nodes[node].setDriver('GV0', tl['signal'].strip('%'), force=False)
self.nodes[node].setDriver('GV0', tl['signal'], force=False)
if tl['watering'] is not None:
self.nodes[node].setDriver('GV1', 1, force=False)
for key in tl['watering']:
if key == 'remaining':
self.nodes[node].setDriver('GV2', tl['watering'][key], force=False)
if key == 'total':
self.nodes[node].setDriver('GV3', tl['watering'][key], force=False)
else:
self.nodes[node].setDriver('GV1', 0, force=False)
self.nodes[node].setDriver('GV2', 0, force=False)
self.nodes[node].setDriver('GV3', 0, force=False)
def query(self):
if self.ready:
self.check_params()
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover_retry(self):
retry_count = str(self.retry_count)
if self.retry_count <= 3000:
LOGGER.info("discover_retry: Failed to start. Retrying attempt: " + retry_count)
self.retry_count += 1
self.discover()
else:
LOGGER.info("discover_retry: Failed to start after 3000 retries. Aborting")
polyglot.stop()
def discover(self, *args, **kwargs):
if self.get_link_tap_devices():
for ctl in self.data['devices']:
gw_name = ctl['name']
gw_address = ctl['gatewayId'][0:8].lower()
self.addNode(GatewayNode(self, gw_address, gw_address, gw_name))
time.sleep(2)
for tl in ctl['taplinker']:
tl_name = tl['taplinkerName']
tl_address = tl['taplinkerId'][0:8].lower()
self.addNode(TapLinkNode(self, gw_address, tl_address, tl_name))
time.sleep(2)
self.ready = True
self.update()
else:
LOGGER.info("Failed to get devices. Will retry in 5 minutes")
self.ready = False
time.sleep(300)
self.discover_retry()
def delete(self):
LOGGER.info('LinkTap Nodeserver: Deleted')
def stop(self):
LOGGER.debug('NodeServer stopped.')
def process_config(self, config):
# this seems to get called twice for every change, why?
# What does config represent?
LOGGER.info("process_config: Enter config={}".format(config))
LOGGER.info("process_config: Exit")
def check_params(self):
default_username = "YourUserName"
default_api_key = "YourApiKey"
if 'username' in self.polyConfig['customParams']:
self.username = self.polyConfig['customParams']['username']
else:
self.username = default_username
LOGGER.error('check_params: user not defined in customParams, please add it. '
'Using {}'.format(self.username))
if 'apiKey' in self.polyConfig['customParams']:
self.apiKey = self.polyConfig['customParams']['apiKey']
else:
self.apiKey = default_api_key
LOGGER.error('check_params: apiKey not defined in customParams, please add it. '
'Using {}'.format(self.apiKey))
self.addCustomParam({'username': self.username, 'apiKey': self.apiKey})
time.sleep(2)
if self.username == default_username or self.apiKey == default_api_key:
self.addNotice({'params_notice': 'Please set proper user and apiKey in '
'configuration page, and restart this nodeserver'})
return False
else:
self.remove_notices_all()
return True
def remove_notice_test(self, command):
LOGGER.info('remove_notice_test: notices={}'.format(self.poly.config['notices']))
# Remove named notices
self.removeNotice('test')
def remove_notices_all(self):
LOGGER.info('remove_notices_all: notices={}'.format(self.poly.config['notices']))
# Remove all existing notices
self.removeNoticesAll()
def update_profile(self, command):
LOGGER.info('update_profile:')
st = self.poly.installprofile()
return st
id = 'controller'
commands = {
'QUERY': query,
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile
}
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]
class GatewayNode(polyinterface.Node):
def __init__(self, controller, primary, address, name):
super(GatewayNode, self).__init__(controller, primary, address, name)
self.data = controller.data
def start(self):
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.address:
if gw['status'] == 'Connected':
self.setDriver('ST', 1)
else:
self.setDriver('ST', 0)
def setOn(self, command):
self.setDriver('ST', 1)
def setOff(self, command):
self.setDriver('ST', 0)
def query(self):
self.reportDrivers()
def update(self):
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.address:
if gw['status'] == 'Connected':
self.setDriver('ST', 1)
else:
self.setDriver('ST', 0)
# "Hints See: https://github.com/UniversalDevicesInc/hints"
# hint = [1,2,3,4]
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2}]
id = 'gateway'
commands = {
'DON': setOn, 'DOF': setOff
}
class TapLinkNode(polyinterface.Node):
def __init__(self, controller, primary, address, name):
super(TapLinkNode, self).__init__(controller, primary, address, name)
self.data = controller.data
self.primary = primary
self.dev_suffix = '004B1200'
def start(self):
for gw in self.data['devices']:
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.address:
if tl['status'] == 'Connected':
self.setDriver('ST', 1, force=True)
else:
self.setDriver('ST', 0, force=True)
self.setDriver('BATLVL', tl['batteryStatus'].strip('%'), force=True)
# self.setDriver('GV0', tl['signal'].strip('%'), force=True)
self.setDriver('GV0', tl['signal'], force=True)
if tl['watering'] is not None:
self.setDriver('GV1', 1, force=True)
for key in tl['watering']:
if key == 'remaining':
self.setDriver('GV2', tl['watering'][key], force=True)
if key == 'total':
self.setDriver('GV3', tl['watering'][key], force=True)
else:
self.setDriver('GV1', 0, force=True)
self.setDriver('GV2', 0, force=True)
self.setDriver('GV3', 0, force=True)
def setOn(self, command):
self.setDriver('ST', 1)
def setOff(self, command):
self.setDriver('ST', 0)
def query(self):
self.reportDrivers()
def instantOn(self, command):
val = command.get('value')
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
duration = int(val)
# if duration == 0:
# action = False
# else:
# action = True
action = True
eco = False
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_instant_mode(gateway, taplinker, action, duration, eco)
self.setDriver('GV1', 1)
self.setDriver('GV2', duration)
self.setDriver('GV3', duration)
def instantOff(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
duration = 0
action = False
eco = False
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_instant_mode(gateway, taplinker, action, duration, eco)
self.setDriver('GV1', 0)
self.setDriver('GV2', duration)
self.setDriver('GV3', duration)
def intervalMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_interval_mode(gateway, taplinker)
def oddEvenMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_odd_even_mode(gateway, taplinker)
def sevenDayMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_seven_day_mode(gateway, taplinker)
def monthMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_month_mode(gateway, taplinker)
# "Hints See: https://github.com/UniversalDevicesInc/hints"
# hint = [1,2,3,4]
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'BATLVL', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 51}, # Signal
{'driver': 'GV1', 'value': 0, 'uom': 2}, # Watering
{'driver': 'GV2', 'value': 0, 'uom': 44}, # Remaining
{'driver': 'GV3', 'value': 0, 'uom': 44}, # Total
{'driver': 'GV4', 'value': 0, 'uom': 44}, # Elapsed
{'driver': 'GV5', 'value': 0, 'uom': 44}, # Instant On Minutes
]
id = 'taplinker'
commands = {
'GV5': instantOn, 'GV10': instantOff, 'GV6': intervalMode, 'GV7': oddEvenMode,
'GV8': sevenDayMode, 'GV9': monthMode
}
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('Template')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
polyglot.stop()
sys.exit(0)
"""
Catch SIGTERM or Control-C and exit cleanly.
"""
| 16,088 | 4,708 |
#!/usr/bin/env python
# encoding: utf-8
"""Note passing from Python"""
from __future__ import absolute_import
from __future__ import print_function
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
tag = 42
N = 10
if rank == 0:
data = numpy.arange(N, dtype=numpy.float64)
print("Process %s note = %s" % (rank, data))
# Note here that MPI datatype discovery is automatic
comm.Send(data, dest=rank + 1, tag=tag)
elif rank < size - 1:
data = numpy.empty(N, dtype=numpy.float64)
comm.Recv(data, source=rank - 1, tag=tag)
print("Process %s note = %s" % (rank, data))
comm.Send(data, dest=rank + 1, tag=tag)
elif rank == size - 1:
data = numpy.empty(N, dtype=numpy.float64)
comm.Recv(data, source=rank - 1, tag=tag)
print("Process %s note = %s" % (rank, data))
else:
raise Exception("Invalid rank.")
| 908 | 346 |
from metanic.settings.defaults import INSTALLED_APPS
from metanic.settings.defaults import MIDDLEWARE
from metanic.settings.defaults import REST_FRAMEWORK
from metanic.settings.defaults import cache_url
from metanic.settings.defaults import env_value
from metanic.settings.defaults import project_path
# We specifically allow `import *` in this case to pull in expected settings
from metanic.settings.defaults import * # noqa
DEBUG = True
DEFAULT_FROM_EMAIL = 'services@metanic.local'
FRONTEND_URL = env_value('frontend_url', 'http://localhost:3030/')
MEDIA_ROOT = project_path('media')
MEDIA_URL = '/media/'
METANIC_REDIRECT_URL = 'http://localhost:3030/'
ROOT_URLCONF = 'metanic.core.urls.development'
STATIC_ROOT = project_path('static')
STATIC_URL = '/static/'
MAILGUN_API_KEY = env_value('mailgun_api_key', default='TEST')
ANYMAIL['MAILGUN_API_KEY'] = MAILGUN_API_KEY
SECRET_KEY = env_value(
'secret_key',
'diagonal stunning powder ledge employ dealer',
)
ACCESS_CONTROL_ALLOW_ORIGINS = [
'localhost:3030',
]
REST_FRAMEWORK['DEFAULT_THROTTLE_CLASSES'] = []
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'
] += ('rest_framework.authentication.SessionAuthentication',)
REST_FRAMEWORK['DEFAULT_THROTTLE_RATES'] = {
'anon': env_value('anon_throttle_rate', default='100/second'),
'sensitive': env_value('sensitive_throttle_rate', default='100/second'),
'user': env_value('user_throttle_rate', default='100/second'),
}
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
CACHES = {
'default': cache_url('redis://localhost:6379/0'),
}
DATABASES = {
'default':
{
'ENGINE':
'django.db.backends.sqlite3',
'NAME':
project_path(
env_value('DATABASE_FILENAME', 'metanic.sqlite3')
),
},
}
ALLOWED_HOSTS = [
'localhost',
'metanic.local',
]
ACCESS_CONTROL_ALLOW_ORIGINS = [
'::1:',
'127.0.0.1',
'127.0.0.1:*',
'localhost',
'localhost:*',
'metanic.local',
'metanic.local:*',
]
INTERNAL_IPS = [
'127.0.0.1',
]
| 2,287 | 880 |
from sensor.models import Sensors
from sensor.serializers import TempSerializer
from rest_framework import generics
from django.shortcuts import render
from django.http import JsonResponse
# Create your views here.
class sensors_api(generics.ListCreateAPIView):
queryset = Sensors.objects.all()
serializer_class = TempSerializer
def sensors(request):
data = Sensors.objects.all()
res = []
if data:
for i in data:
tt = i.captime
ax = i.accx
ay = i.accy
az = i.accz
mx = i.magx
my = i.magy
mz = i.magz
gx = i.gyrx
gy = i.gyry
gz = i.gyrz
tx = i.longitude
ty = i.latitude
res.append([tt.isoformat(), float(ax), float(ay), float(az), float(mx), float(my), float(mz), float(gx),
float(gy), float(gz), float(tx), float(ty)])
return render(request, 'sensors_index.html', locals())
# 给html传送需要的gps数据
def get_sensors(request):
data = Sensors.objects.all()
res = []
if data:
for i in data:
tt = i.captime
ax = i.accx
ay = i.accy
az = i.accz
mx = i.magx
my = i.magy
mz = i.magz
gx = i.gyrx
gy = i.gyry
gz = i.gyrz
tx = i.longitude
ty = i.latitude
res.append({"tt": tt.isoformat(), "ax": float(ax), "ay": float(ay), "az": float(az),
"mx": float(mx), "my": float(my), "mz": float(mz),
"gx": float(gx), "gy": float(gy), "gz": float(gz),
"longitude": float(tx), "latitude": float(ty)})
return JsonResponse({'s1': res})
| 1,848 | 652 |
"""
Make sure to run with `python -m pytest` instead of `pytest`. That
way the root project directory is added to sys.path.
"""
import pytest
from encase import Encase
def test_get_dot_notation():
e = Encase({'test': 'value'})
assert e.test == 'value'
def test_set_dot_notation():
e = Encase()
e.new_value = 'New Value'
assert e['new_value'] == 'New Value'
def test_get_method():
e = Encase({'test': 'value'})
assert e.get('test') == 'value'
def test_set_method():
e = Encase()
e.set('test_key', 'Example Value')
assert e['test_key'] == 'Example Value'
def test_set_value_as_list():
e = Encase()
L = ['value1', 'value2', 'value3']
e.new_list = L
assert e.new_list[0] == 'value1'
assert e.new_list[1] == 'value2'
assert e.new_list[2] == 'value3'
def test_method_overwrite_prevention():
e = Encase()
with pytest.raises(AttributeError):
e.copy = 'Some Value'
| 950 | 352 |
from functools import wraps
# Easy inteceptor for tracing
def trace_activity(fn, trace=True):
@wraps(fn)
def wrapper(*args, **kwargs):
if trace:
print(f'TRACE: calling {fn.__name__}(), positional args: {args}, named args: {kwargs}')
return fn(*args, **kwargs)
return wrapper
# Error handlers
class AppError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
class DatabaseError(AppError):
def __init__(self, error, status_code):
super.__init__(self, error, status_code)
| 597 | 182 |
# Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
class TorchModel(object):
@property
def normalized_input(self):
raise NotImplementedError
@property
def normalized_output(self):
raise NotImplementedError
| 341 | 102 |
from engagement.engager import Engager
from engagement.engagement_exception import EngagementException
from entities.acurerate_attributes import P, C
from utils.acurerate_utils import AcureRateUtils
import clearbit
class ClearbitEngager(Engager):
ACURATE_TRIAL_KEY = "sk_2a34f937f031587cb2bf4f6ee84a3c70" # AcureRate - Trial
#ACURATE_PRODUCTION_KEY = "no production key yet" # AcureRate - Production
THE_KEY = ACURATE_TRIAL_KEY
def __init__(self):
super().__init__()
clearbit.key = ClearbitEngager.THE_KEY
def __str__(self):
return 'Clearbit Engager'
def __repr__(self):
return 'Clearbit Engager'
def get_provider_name(self):
return 'Clearbit'
def get_short_symbol(self):
return 'clb'
def get_api_key(self):
return ClearbitEngager.THE_KEY
def set_enrich_key(self):
t = self.enriched_entity.__class__.__name__
if t == 'AcureRatePerson':
email = self.get_pivot_email()
if email is None:
raise EngagementException("Clearbit - cannot engage. No email available as enrich key")
self.enrich_key = email
elif t == 'AcureRateCompany':
if C.DOMAIN not in self.enriched_entity.deduced:
raise EngagementException("Clearbit - cannot engage - no domain property to use as key")
self.enrich_key = self.enriched_entity.deduced.get(C.DOMAIN)
else:
raise EngagementException("Clearbit - cannot engage - cannot generate enrich key. Unknown entity type")
def enrich_person(self):
result_obj = self._get_person_info()
if 'pending' in result_obj and result_obj['pending']:
msg = 'Failed to get information on person %s. Pending (202)' % self.enrich_key
raise EngagementException(msg)
if 'person' not in result_obj or result_obj['person'] is None:
msg = 'Failed to get information on person %s. Not Found (404)' % self.enrich_key
raise EngagementException(msg)
enriched = False
person_data = result_obj['person']
# Get the name properties
if 'name' in person_data:
self.set_data(P.FIRST_NAME, person_data['name']['givenName'])
self.set_data(P.LAST_NAME, person_data['name']['familyName'])
self.set_data(P.FULL_NAME, person_data['name']['fullName'])
if 'email' in person_data and person_data['email'] != self.enrich_key:
self.set_data(P.EMAIL, person_data['email'])
self.add_data(P.EMAILS, person_data['email'])
if 'gender' in person_data and person_data['gender']:
#enriched = True
self.add_data(P.GENDER, person_data['gender'])
if 'bio' in person_data and person_data['bio']:
enriched = True
self.add_data(P.SHORT_DESCRIPTION, person_data['bio'])
if 'location' in person_data and person_data['location']:
enriched = True
self.add_data(P.LOCATIONS, person_data['location'])
if 'facebook' in person_data and person_data['facebook']['handle']:
enriched = True
self.add_data(P.FACEBOOK_URL, person_data['facebook']['handle'])
if 'linkedin' in person_data and person_data['linkedin']['handle']:
enriched = True
self.add_data(P.LINKEDIN_URL, result_obj['person']['linkedin'])
if 'twitter' in person_data and person_data['twitter']['handle']:
enriched = True
self.add_data(P.TWITTER_URL, result_obj['person']['twitter'])
if 'googleplus' in person_data and person_data['googleplus']['handle']:
enriched = True
self.add_data(P.GOOGLEPLUS_URL, result_obj['person']['googleplus'])
if 'employment' in person_data:
job = {}
if person_data['employment'].get('name', None) is not None:
job[P.JOB_NAME] = person_data['employment'].get('name', [])
if person_data['employment'].get('title', None) is not None:
job[P.JOB_TITLE] = person_data['employment'].get('title', [])
if person_data['employment'].get('role', None) is not None:
job[P.JOB_ROLE] = person_data['employment'].get('role', [])
if job != {}:
enriched = True
self.add_data(P.JOBS, job)
# TODO: gravatar, aboutme, github
if not enriched:
msg = 'Failed: no information added to person %s' % self.enrich_key
raise EngagementException(msg)
return [P.JOBS]
def enrich_company(self):
result_obj = self._get_company_info()
if 'pending' in result_obj and result_obj['pending']:
msg = 'Failed to get information on person %s. Pending (202)' % self.enrich_key
raise EngagementException(msg)
if 'company' not in result_obj or result_obj['company'] is None:
msg = 'Failed to get information on company %s. Not Found (404)' % self.enrich_key
raise EngagementException(msg)
enriched = False
company_data = result_obj['company']
return [C.NAME]
def _get_person_info(self):
try:
response = clearbit.Enrichment.find(email=self.enrich_key)
except EngagementException as e:
raise e
except Exception as e:
raise EngagementException(e, True)
return response
def _get_company_info(self):
try:
response = clearbit.Company.find(domain=self.enrich_key, stream=True)
except EngagementException as e:
raise e
except Exception as e:
raise EngagementException(e, True)
return response
| 5,798 | 1,762 |
from knapsack import Knapsack
class Ratio(Knapsack):
"""
Ratio sorts all items by ratio of price and weight and adds
them into bag step by step, if is it possible.
"""
def evaluate(self):
price = 0
capacity = self.capacity
configuration = [0 for i, val in enumerate(self.items)]
sorted_items = sorted(
self.items, key=lambda item: item.ratio, reverse=True
)
for item in sorted_items:
if item.weight <= capacity:
capacity -= item.weight
price += item.price
configuration[item.index] = 1
return price, configuration
| 666 | 176 |
constants.physical_constants["nuclear magneton in inverse meters per tesla"] | 76 | 22 |
import logging
from hdx.location.country import Country
from hdx.scraper.base_scraper import BaseScraper
logger = logging.getLogger(__name__)
class UNHCR(BaseScraper):
def __init__(self, datasetinfo, today, outputs, countryiso3s):
super().__init__(
"unhcr",
datasetinfo,
{
"national": (
("NoRefugees", "RefugeesDate"),
("#affected+refugees", "#affected+date+refugees"),
),
"regional": (
("NoRefugees",),
("#affected+refugees",),
),
},
)
self.today = today
self.outputs = outputs
self.countryiso3s = countryiso3s
def run(self):
url = self.datasetinfo["url"]
valuedicts = self.get_values("national")
reader = self.get_reader()
json = reader.download_json(url)
total_refugees = 0
for data in json["data"]:
individuals = int(data["individuals"])
total_refugees += individuals
date = data["date"]
countryiso3, _ = Country.get_iso3_country_code_fuzzy(data["geomaster_name"])
if countryiso3 in self.countryiso3s:
valuedicts[0][countryiso3] = individuals
valuedicts[1][countryiso3] = date
self.get_values("regional")[0]["value"] = total_refugees
url = self.datasetinfo["url_series"]
json = reader.download_json(url)
rows = [
("RefugeesDate", "NoRefugees"),
("#affected+date+refugees", "#affected+refugees"),
]
for data in json["data"]["timeseries"]:
rows.append((data["data_date"], data["individuals"]))
tabname = "refugees_series"
for output in self.outputs.values():
output.update_tab(tabname, rows)
self.datasetinfo["source_date"] = self.today
| 1,945 | 573 |
from os.path import join
import torch
import config
from models.attention_model import ZReader as ZReader_attn
from models.fnet_model import ZReader as ZReader_fnet
if __name__ == "__main__":
str_true = "thehighwayroundsacurveandtransitionstondstreetrunningeastwardthroughmorefarmlandasthetrunklineappro" \
"achesneway"
str_ = "twer hvb rye hfj idf g fdhh ghw kja ghjy r rtyo u nfgh dhjk s a cghfhf u r vfgh e a fn d t r afgh n s i " \
"tfgh i ghjo n srt t o nghj d smn t rkl e edfg t fdr u fdn n iret hn rtyg e adfsg s t wdfg a r vbd t " \
"hvbcnv r o u iopg xcvh zxm sdo qwr dfge frety a dfgr m l kla ern drt auio jks vbnt bvnh e fght r u dsfn" \
" ikk bnl gfi kbn fe ea hgp dsfp feir bnco ajkl etc dfh ehjd s dgn e dfw dfka yghp"
batch_ = [str_]
with open(file="ztext.txt", encoding="utf-8", mode="r") as file:
batch = file.readlines()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ZReader_attn(*ZReader_attn.get_parameters()).to(device)
model.load_parameters(join(config.weights_path, '0529_1629_72'), device=device)
model.z_read(batch_)
| 1,167 | 468 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.5'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| 907 | 351 |
from functional_groups import SMARTS_ATOM_MAP
from typing import List, Tuple
import numpy as np
#typedef
xyzcoord = Tuple[float, float, float]
class iFG():
'''
iFG class to store information about ligand atoms that make up an iFG, mapping to amino acid residues,
as well as the protein residues that interact with the iFG
'''
def __init__(self, name:str, cfg_entry:dict):
self.canonical_name = name.split('-')[0]
self.name = name
atom_list = []
coord_list = []
for atm in cfg_entry:
atom_list.append(atm)
coord_list.append(cfg_entry[atm])
self.lig_atoms = atom_list
self.heavy_atom_coords = np.vstack(coord_list)
self.aa_atoms = None
self.contacts = dict()
self.ala_ref = np.array([[ 0.11800566, -2.45811057, 0. ], \
[-0.77976233, -1.30825949, 0. ], \
[ 0. , 0. , 0. ]])
def __repr__(self):
return self.name
def get_aa_atom_names(self, SMARTS_ATOM_MAP):
lig_aa_mapping = {}
for restype in SMARTS_ATOM_MAP[self.canonical_name]:
atoms = SMARTS_ATOM_MAP[self.canonical_name][restype]
lig_aa_mapping[restype] = dict(zip(self.lig_atoms, atoms))
self.aa_atoms = lig_aa_mapping
def add_interacting_residue(self, interaction_type:str, resid:str, name2:str, name1:str, idx_coord_map:dict):
'''
if interaction_type == 'cc':
if resid not in self.ccs:
self.ccs.update({resid: {'atoms': idx_coord_map[resid]['names'], 'coords': idx_coord_map[resid]['coords'], 'interaction': [(name2 ,name1)]}})
else:
self.ccs[resid]['interaction'].append((name2, name1))
elif interaction_type == 'hb':
if resid not in self.hbs:
self.hbs.update({resid: {'atoms': idx_coord_map[resid]['names'], 'coords': idx_coord_map[resid]['coords'], 'interaction': [(name2 ,name1)]}})
else:
self.hbs[resid]['interaction'].append((name2, name1))
'''
if resid not in self.contacts:
self.contacts.update({resid: {'residue_atoms': idx_coord_map[resid]['names'], \
'residue_coords': np.array(idx_coord_map[resid]['coords']),
'interactions': {'hb': set(), 'cc': set()}}})
self.contacts[resid]['interactions'][interaction_type].add((name2,name1))
class VDM():
def __init__(
self,
name: str,
atom_names: List[str],
atom_coords: List[xyzcoord],
):
self.name = name
self.canonical_name = name.split('_')[0]
self.atoms = atom_names
self.coords = np.array(atom_coords)
self.aa_atoms = None
self.contacts = dict()
def get_aa_atoms(self):
return self.aa_atoms
def set_aa_atoms(self):
global SMARTS_ATOM_MAP
lig_aa_mapping = dict()
for restype in SMARTS_ATOM_MAP[self.canonical_name]:
atoms = SMARTS_ATOM_MAP[self.canonical_name][restype]
lig_aa_mapping[restype] = dict(zip(self.atoms, atoms))
self.aa_atoms = lig_aa_mapping
def __repr__(self):
return self.name
def update_contacts(self, new_contact: dict):
self.contacts.update(new_contact) | 3,590 | 1,214 |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 13:35:09 2019
@author: Petronium
"""
# Import system modules
import rasterio
from rasterio.plot import show
fp1 = r'Bathymetry_Clip.tif'
fp2 = r'Shipping_Clip.tif'
fp3 = r'WindMap_Clip.tif'
bathymetry = rasterio.open(fp1)
shipping = rasterio.open(fp2)
windspeed = rasterio.open(fp3)
bathymetry_b = bathymetry.read(1)
shipping_b = shipping.read(1)
windspeed_b = windspeed.read(1)
def raster_calculation(raster_list, weight_list):
"""
Function to calcule weighted sum of the rasters
Args:
raster_list (list): input rasters
weight_list (list): input weight of the rasters
Returns:
result raster
"""
assert len(raster_list) == len(weight_list), "Both list should have the same length!"
result_map = 0
for r, w in zip(raster_list, weight_list):
result_map += r * w
return result_map
def saving_to_file(ras_name):
"""
Function to save the raster
Args:
name(variable): name of the raster
Returns:
None
"""
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = bathymetry.profile
# And then change the band count to 1, set the
# dtype to uint8, and specify LZW compression.
profile.update(
dtype=rasterio.float32,
count=1,
compress='lzw')
with rasterio.open('ResultMap.tif', 'w', **profile) as dst:
dst.write(ras_name.astype(rasterio.float32), 1)
result_map = raster_calculation((bathymetry_b, shipping_b, windspeed_b), (0.35,0.2,0.45))
saving_to_file(result_map)
| 1,806 | 655 |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.net/
@software: PyCharm
@file: viewmodels.py
@time: 2018/6/9 上午1:28
"""
from rest_framework import serializers
from accounts.models import UserModel, WxUserModel
from orders.models import OrderModel, BlessingModel
from xiaobiaobai.utils import get_systemconfigs, convert_to_uuid, check_words_spam
from django.core.exceptions import ObjectDoesNotExist
from accounts.viewmodels import UserModelSerializer
import logging
logger = logging.getLogger(__name__)
class BlessingSerializer(serializers.Serializer):
usermodel = serializers.PrimaryKeyRelatedField(queryset=UserModel.objects.all(),
pk_field=serializers.UUIDField())
ordermodel = serializers.PrimaryKeyRelatedField(queryset=OrderModel.objects.all(),
pk_field=serializers.UUIDField())
class PostLoveSerializer(serializers.Serializer):
usermodel = serializers.PrimaryKeyRelatedField(queryset=UserModel.objects.all(),
pk_field=serializers.UUIDField()
)
username = serializers.CharField(required=True)
target_username = serializers.CharField(required=True)
background_img = serializers.CharField(required=False, allow_blank=True)
candies_count = serializers.IntegerField(required=True, min_value=0)
order_content = serializers.CharField(required=True, max_length=200)
city = serializers.CharField(required=True, max_length=100)
# def validate(self, attrs):
# order_content = attrs['order_content']
# if not check_words_spam(order_content):
# raise serializers.ValidationError("order_content", "订单内容非法")
# return super(PostLoveSerializer, self).validate(attrs)
class OrderSerializer(serializers.Serializer):
id = serializers.UUIDField(format='hex')
usermodel = UserModelSerializer(read_only=True)
username = serializers.CharField()
target_username = serializers.CharField(required=True)
background_img = serializers.CharField(required=False, allow_blank=True)
candies_count = serializers.IntegerField(required=True, min_value=0)
order_content = serializers.CharField(required=True, max_length=200)
city = serializers.CharField(required=True, max_length=100)
wx_prepayid = serializers.CharField(required=True, max_length=100)
# blessings = BlessingSerializer(read_only=True, many=True, source='blessingmodel_set')
confirmations = serializers.IntegerField(required=False)
block_height = serializers.IntegerField(required=False)
txid = serializers.CharField(required=False)
block_chain_url = serializers.CharField(required=False)
blessing_count = serializers.IntegerField(required=False, default=0)
show_confession_wall = serializers.BooleanField(default=True)
created_time = serializers.DateTimeField(required=False)
pay_time = serializers.DateTimeField(required=False)
class ConfessionWallSerializer(serializers.Serializer):
usermodel = serializers.PrimaryKeyRelatedField(queryset=UserModel.objects.all(),
pk_field=serializers.UUIDField()
)
ordermodel = serializers.PrimaryKeyRelatedField(queryset=OrderModel.objects.all(),
pk_field=serializers.UUIDField())
status = serializers.BooleanField(required=True)
| 3,651 | 1,042 |
#!/usr/bin/env python
import requests
from PIL import Image
from .image_utils import image_to_base64
from .results import ClassificationResult
def send_image_predict_request(image: Image.Image, predict_url: str) -> ClassificationResult:
payload = {
"inputs": {"Image": image_to_base64(image)},
}
response = requests.post(predict_url, json=payload)
response.raise_for_status()
return ClassificationResult.from_json(response.text)
| 460 | 135 |
# 2678 - Discagem por Voz
# https://www.urionlinejudge.com.br/judge/pt/problems/view/2678
def decode(x):
if x.isdigit() or x in "*#":
return x
elif x in "ABC":
return "2"
elif x in "DEF":
return "3"
elif x in "GHI":
return "4"
elif x in "DEF":
return "3"
elif x in "GHI":
return "4"
elif x in "JKL":
return "5"
elif x in "MNO":
return "6"
elif x in "PQRS":
return "7"
elif x in "TUV":
return "8"
elif x in "WXYZ":
return "9"
return ""
def main():
while True:
try:
telephone = input().upper()
except EOFError:
break
print("".join(decode(x) for x in telephone))
if __name__ == "__main__":
main()
| 793 | 294 |
import json
from .base_test import BaseTestCase
class TestMeetups(BaseTestCase):
def test_post_meetups(self):
with self.client:
response = self.client.post(
'api/v1/meetups', data=json.dumps(dict(
meetup_id=2,
createdOn='12 SEP 2019',
location='nairobi',
images='img.url',
topic='interesting topic',
happeningOn='15 SEP 2019',
tags='INTERESTING'
)),
content_type='application/json'
)
self.assertEqual(response.status_code, 201)
def test_view_meetups(self):
with self.client:
response = self.client.get('/api/v1/meetups')
x = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertEqual("meetups not found", x["message"])
def test_get_meetup_by_id(self):
with self.client:
response = self.client.get('/api/v1/meetups/3')
x = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertEqual("Meetup with the given id not found", x["error"])
def test_view_upcoming_meetups(self):
with self.client:
response = self.client.get('/api/v1/meetups/3')
x = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertEqual("Meetup with the given id not found", x["error"])
def test_delete_meetup(self):
with self.client:
self.client.post(
'api/v1/meetups', data=json.dumps(dict(
meetup_id=2,
createdOn='12 SEP 2019',
location='nairobi',
images='img.url',
topic='interesting topic',
happeningOn='15 SEP 2019',
tags='INTERESTING'
)),
content_type='application/json'
)
response = self.client.delete('/api/v1/meetups/3')
res1 = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertEqual("Meetup with the id provided was not found", res1["error"])
response2 = self.client.delete('/api/v1/meetups/2')
res = json.loads(response2.data)
self.assertEqual(response2.status_code, 200)
self.assertEqual("delete successful", res["message"])
def test_create_rsvp(self):
with self.client:
response = self.client.post('api/v1/meetups/3/rsvps', data=json.dumps(dict(
id= 13,
meetup= 1,
user= 1,
response= 'maybe'
)), content_type='application/json')
self.assertEqual(response.status_code, 201)
| 2,949 | 888 |
import setuptools
import pyside2_style_test as testkit
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as req:
dependencies = req.read()
setuptools.setup(
name="m3-pyside2-style-test",
version=testkit.__version__,
author=testkit.__author__,
author_email="cplusplusook@gmail.com",
license="MIT",
description="A Qt-5 interactive stylesheet preview script",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/m3tior/pyside2-style-test",
packages=setuptools.find_packages(),
install_requires=dependencies,
entry_points={
"console_scripts": "pyside2-style-test=pyside2_style_test.cli:_main"
},
classifiers=[
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 917 | 313 |
from presidio_anonymizer.entities import AnonymizerResult
from presidio_anonymizer.entities.anonymized_entity import AnonymizedEntity
def test_when_no_params_then_object_initialised_correctly():
res = AnonymizerResult()
assert res.text is None
assert res.items == []
def test_when_correct_params_then_object_initialised_correctly():
ari = AnonymizedEntity("an", "b", 1, 2, "c")
res = AnonymizerResult("a", [ari])
assert res.text == "a"
assert res.items[0] == ari
def test_when_normalized_items_called_then_idexes_are_normalized():
ari = AnonymizedEntity("a", "b", 1, 2, "cd")
res = AnonymizerResult("*****", [ari])
res.normalize_item_indexes()
assert res.items[0].start == 3
assert res.items[0].end == 5
def test_when_set_text_then_text_is_set():
res = AnonymizerResult()
res.set_text("a")
assert res.text == "a"
def test_when_add_item_the_item_added():
res = AnonymizerResult()
ari = AnonymizedEntity("a", "b", 1, 2, "cd")
res.add_item(ari)
assert res.items[0] == ari
def test_when_eq_called_then_instances_are_equal():
res = AnonymizerResult()
res.set_text("a")
res2 = AnonymizerResult()
res2.set_text("a")
assert res.__eq__(res2)
def test_when_not_eq_called_then_instances_are_not_equal():
res = AnonymizerResult()
res.set_text("a")
res2 = AnonymizerResult()
res2.set_text("b")
assert res.__eq__(res2) is False
| 1,446 | 544 |
import time
import torch
# Project imports
from utils import *
from options import TrainOptions
from data_loaders import create_dataloader
from methods import create_method
class Trainer:
"""
Trains any of the model methods in methods/ using any of the losses
in losses/.
Initialize the trainer using a set of parsed arguments from options/.
"""
def __init__(self, args):
self.args = args
# Retrieve train and validation data loaders.
self.loader = create_dataloader(self.args, 'train')
self.val_loader = create_dataloader(self.args, 'val')
num_iterations_per_epoch = len(self.loader)
setattr(args, 'num_iterations', num_iterations_per_epoch)
# Initialize a model.
self.model = create_method(args, self.loader)
# We keep track of the aggregated losses per epoch in a dict. For now the pre-training
# train loss is set to zero.
self.best_val_loss = float('Inf')
self.validate(-1)
pre_validation_update(self.model.losses[-1]['val'])
def train(self):
""" Main function for training any of the methods, given an input parse.
"""
for epoch in range(self.args.epochs):
self.model.update_learning_rate(epoch, self.args.learning_rate)
c_time = time.time()
self.model.to_train()
# Run a single training epoch.
self.model.run_epoch(epoch)
# Perform a validation pass each epoch.
self.validate(epoch)
# Print an update of training, val losses.
print_epoch_update(epoch, time.time() - c_time, self.model.losses)
# Make a checkpoint, so training can be resumed.
running_val_loss = self.model.losses[epoch]['val']
is_best = running_val_loss < self.best_val_loss
if is_best:
self.best_val_loss = running_val_loss
self.model.save_network('best')
print('Finished Training. Best validation loss:\t{:.4f}'.format(self.best_val_loss))
# Save the model of the final epoch. If another model was better, also save it separately as best.
self.model.save_network('final')
self.model.save_losses()
def validate(self, epoch):
self.model.to_eval()
val_loss = 0.0
for data in self.val_loader:
# Get the losses for the model for this epoch.
self.model.set_input(data)
self.model.forward()
iter_loss = self.model.get_untrained_loss()
val_loss += iter_loss
# Compute the loss over this validation set.
val_loss /= len(self.val_loader)
# Store the running loss for the validation images.
self.model.store_val_loss(val_loss, epoch)
def verify_data(self):
""" Verifies whether all data has been downloaded and correctly put in the data directory.
"""
check_if_all_images_are_present('eigen', self.args.data_dir)
check_if_all_images_are_present('cityscapes', self.args.data_dir)
def main():
parser = TrainOptions()
args = parser.parse()
args.mode = 'train'
# Print CUDA version.
print("Running code using CUDA {}".format(torch.version.cuda))
gpu_id = int(args.device[-1])
torch.cuda.set_device(gpu_id)
print('Training on device cuda:{}'.format(gpu_id))
trainer = Trainer(args)
if args.mode == 'train':
trainer.train()
elif args.mode == 'verify-data':
trainer.verify_data()
if __name__ == '__main__':
main()
print("YOU ARE TERMINATED!")
| 3,637 | 1,093 |
import numpy as np
import cv2
# -------- Aufgabe3.4 -------- #
# Vector um Grenz- und Breitenwert zu speichern
vec = np.zeros((5, 2))
crop = ["crop1", "crop2", "crop3", "crop4", "crop5"]
# Bild einlesen und in Schwarz - Weiß umwandeln
image = cv2.imread('data/korrigiertes_bild2.png', cv2.IMREAD_GRAYSCALE)
# Grenz- und Breitenwerte für Bild 1
vec[0, 0] = 0
vec[0, 1] = 105
# Grenz- und Breitenwerte für Bild 2
vec[1, 0] = 111
vec[1, 1] = 135
# Grenz- und Breitenwerte für Bild 3
vec[2, 0] = 249
vec[2, 1] = 137
# Grenz- und Breitenwerte für Bild 4
vec[3, 0] = 389
vec[3, 1] = 132
# Grenz- und Breitenwerte für Bild 5
vec[4, 0] = 529
vec[4, 1] = 111
# Auf alle 5 Dateien zugreifen
for z in range(1, 6):
# Ab welchem Pixel in der Höhe y soll begonnen werden
y = 0
# Ab welchem Pixel in der Breite x soll begonnen werden
x = int(vec[z-1, 0])
# Wie tief soll der Pixel gehen in h
h = 480
# Wie breit soll der Pixel gehen in w
w = int(vec[z-1, 1])
# Schneiden des Bildes mit den Variablen von oben
crop[z - 1] = image[y:y + h, x:x + w]
# geschnittene Bilder anzeigen
cv2.imshow("Crop" + str(z), crop[z - 1])
# geschnittene Bilder exportieren
cv2.imwrite("korrigiert" + str(z) + ".png", crop[z-1])
cv2.waitKey(0)
| 1,272 | 584 |
# NO_TEEN_SUM
def no_teen_sum(a, b, c):
return fix_teen(a) + fix_teen(b) + fix_teen(c)
def fix_teen(n):
if (n == 15 or n == 16) or n<13 or n>19: return n
elif n>12 and n<20: return 0 | 197 | 105 |
'''This code is written MTH Junaidi github: Miranjunaidi on 26th Oct 2019 at 7:15 PM IST'''
#POP function removes the last element of the list
def pop(stack):
del stack[-1]
return stack
#push function adds a new number num to the list at the end.
def push(stack,num):
stack.append(num)
return stack
#print function prints all the element of the stack sequestially
def prints(stack):
for i in stack:
print(i,end = " ")
print("")
#This is where the main funtions start
print("\n welcome, a new empty stack is created. \n press index numbers to do the opertations \n")
stack = []
a=0
while a!=4:
print(" 1.push \n 2.pop \n 3.see the stack \n 4.quit")
a = int(input())
if a == 1:
num = int(input("Enter the number you want to push"))
push(stack, num)
elif a == 2:
pop(stack)
elif a==3:
prints(stack)
elif a>4 or a<0:
print("enter a valid operation") | 944 | 318 |
import os
from collections import defaultdict
from typing import Any, Dict, Iterable, List, Set, Union
def validate_and_read_file(filepath: str) -> List[str]:
if not os.path.exists(filepath):
raise FileNotFoundError(f"Error: no file found at {filepath}")
return open(filepath).readlines()
def get_default_input_filename(day: str, year: str) -> str:
return f"inputs/{year}/day_{day}.txt"
def get_default_solution_filename(day: str, year: str) -> str:
return f"daily_solutions/year_{year}/day_{day}.py"
def get_default_test_filename(day: str, year: str) -> str:
return f"daily_solutions/year_{year}/tests/test_day_{day}.py"
def group_entries_by_line_break(input_lines: List[str]) -> List[List[str]]:
entries: List[List[str]] = []
current_entry: List[str] = []
for line in input_lines:
line = line.strip()
if line == "":
entries.append(current_entry)
current_entry = []
else:
current_entry.append(line.strip())
if current_entry:
entries.append(current_entry)
return entries
def get_frequency_counts(input_list: Iterable[Any]) -> Dict[Any, int]:
count_dict: Dict[Any, int] = defaultdict(int)
for elt in input_list:
count_dict[elt] += 1
return count_dict
def get_unique_entries(entry: Iterable[Any]) -> Set[Any]:
return {e for e in entry}
| 1,390 | 465 |
from django.conf.urls import url
from lotus_auth import forms
from django.contrib.auth import views as django_auth_views
from lotus_auth import views as lotus_auth_views
urlpatterns = [
# MWC Custom Auth Views
url(r'^login/$',
lotus_auth_views.login, {
'template_name': 'lotus_auth/login.html',
'authentication_form': forms.LotusAuthenticationForm
}, name='account_login'),
]
| 426 | 130 |
# -*- coding: cp1251 -*
import socket
import time
import sys
from PyQt5 import QtWidgets
from threading import Thread
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QVBoxLayout, QSplitter, QTextEdit, QWidget)
from PyQt5.QtGui import QTextCursor
import MySQLdb
class Settings_server:
host = socket.gethostbyname(socket.gethostname())
port = 6046
clients = []
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("",port))
class ServerThread(Thread):
def __init__(self, window):
Thread.__init__(self)
self.window = window
def run(self):
db = MySQLdb.connect("localhost", "root", "protozerg", "datachat1", charset='cp1251')
db.autocommit(True)
cursor = db.cursor()
window.chat.append("Server started")
while True:
data, adr = Settings_server.sock.recvfrom(1024)
if adr not in Settings_server.clients:
Settings_server.clients.append(adr)
times = time.strftime("%Y-%m-%d-%H.-%M.-%S", time.localtime())
window.chat.append("[" + adr[0] + "]=[" + str(adr[1]) + "]=[" + times + "]\n")
for client in Settings_server.clients:
if adr != client:
Settings_server.sock.sendto(data, client)
window.chat.append(data.decode("cp1251") + "\n")
data_dec = str(data.decode("cp1251"))
if data_dec[0] == "A":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select 'name', `password` from `Users` Where `name` = %s and `password` = %s;", res)
rows = cursor.fetchone()
if rows is None:
data_to_client = "A" + "False"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
else:
data_to_client = "A" + "True"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif data_dec[0] == "R":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select 'name' from `Users` Where `name` = %s;", [res[0]])
rows = cursor.fetchone()
if rows is None:
cursor.execute("INSERT Users(name, password) VALUES (%s, %s);", res)
data_to_client = "R" + "True"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif rows is not None:
data_to_client = "R" + "False"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif data_dec[0] == "M":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select `id`, `name` from `Users` Where `name` = %s", [res[0]])
row = cursor.fetchone()
values = []
values.append(res[1])
values.append(int(row[0]))
cursor.execute("INSERT INTO Messages(text, User_id, date) VALUES (%s, %s, NOW());", values)
elif data_dec[0] == "P":
cursor.execute(
"Select M.text, M.date, U.name from `Messages` M INNER JOIN `Users` U ON U.id = M.User_id ORDER BY M.date;")
row = cursor.fetchall()
l = []
for rall in row:
new = []
new.append(rall[2])
new.append(str(rall[1]))
new.append(rall[0])
l.append(new)
for d in l:
Settings_server.sock.sendto(("P" + str(d)).encode("cp1251"), client)
db.close()
class Window(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setObjectName("Server")
self.resize(371, 500)
self.setMaximumSize(300, 700)
self.setMinimumSize(400, 700)
self.setStyleSheet("\n""background-color: rgb(167, 198, 255);")
self.chat = QTextEdit()
self.chat.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"font: 75 italic 15pt \"MS Shell Dlg 2\";")
self.chat.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.chat.setReadOnly(True)
self.chatBody = QVBoxLayout(self)
splitter = QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(self.chat)
splitter.setSizes([400, 100])
splitter2 = QSplitter(QtCore.Qt.Vertical)
splitter2.addWidget(splitter)
splitter2.setSizes([200, 10])
self.chatBody.addWidget(splitter2)
self.setWindowTitle("Chat Application")
self.resize(500, 500)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Window()
ServerThread = ServerThread(window)
ServerThread.start()
window.show()
window.hide()
app.exec_()
ServerThread.join()
Settings_server.sock.close()
| 5,165 | 1,651 |
""" Command Line Interface Module """
import optparse
import sys
import os
from ansibledocgen.parser.dir import DirParser
from ansibledocgen.formatter.markup import MarkupFormatter
class Cli(object):
""" Command Line Interface for ansible-docgen """
def __init__(self):
""" Setup Arguments and Options for CLI """
# Parse CLI Arguments
parser = optparse.OptionParser()
parser.add_option("-p", "--project", dest="project",
help="Path to Ansible project",
metavar="PROJECT",
default="./")
parser.add_option("-s", "--style", dest="style",
help="Choose the format for the documentation.\
Default is markup. Example: --style=[markup]",
metavar="STYLE",
default="markup")
parser.add_option("-n", "--no-tags", dest="show_tags",
action='store_false',
help="This option disables show tags in the documentation",
metavar="TAGS",
default=True)
(options, args) = parser.parse_args()
# Make sure there is a trailing /
self.project = os.path.join(options.project, "")
self.style = options.style
self.params = {}
self.params['show_tags'] = options.show_tags
# Used to Parse Roles and Playbooks
self.dirparser = None
self.formatter = None
def run(self):
""" EntryPoint Of Application """
# Parse Project for Roles and Playbooks
self.dirparser = DirParser(self.project)
# Based on chosen style, use the associated formatter
if self.style == "markup":
self.formatter = MarkupFormatter(
self.dirparser.get_parserdata(), self.project, self.params)
self.formatter.parse_data()
self.formatter.write_files()
else:
print("Error: Use of an unsupported style.\
The supported styles are: markup")
sys.exit(1)
| 2,147 | 541 |
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from reclass.settings import Settings
from reclass.storage.memcache_proxy import MemcacheProxy
from reclass.storage import NodeStorageBase
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
class TestMemcacheProxy(unittest.TestCase):
def setUp(self):
self._storage = mock.MagicMock(spec_set=NodeStorageBase)
def test_no_nodes_caching(self):
p = MemcacheProxy(self._storage, cache_nodes=False)
NAME = 'foo'; NAME2 = 'bar'; RET = 'baz'; SETTINGS = Settings()
self._storage.get_node.return_value = RET
self.assertEqual(p.get_node(NAME, SETTINGS), RET)
self.assertEqual(p.get_node(NAME, SETTINGS), RET)
self.assertEqual(p.get_node(NAME2, SETTINGS), RET)
self.assertEqual(p.get_node(NAME2, SETTINGS), RET)
expected = [mock.call(NAME, SETTINGS), mock.call(NAME, SETTINGS),
mock.call(NAME2, SETTINGS), mock.call(NAME2, SETTINGS)]
self.assertListEqual(self._storage.get_node.call_args_list, expected)
def test_nodes_caching(self):
p = MemcacheProxy(self._storage, cache_nodes=True)
NAME = 'foo'; NAME2 = 'bar'; RET = 'baz'; SETTINGS = Settings()
self._storage.get_node.return_value = RET
self.assertEqual(p.get_node(NAME, SETTINGS), RET)
self.assertEqual(p.get_node(NAME, SETTINGS), RET)
self.assertEqual(p.get_node(NAME2, SETTINGS), RET)
self.assertEqual(p.get_node(NAME2, SETTINGS), RET)
expected = [mock.call(NAME, SETTINGS), mock.call(NAME2, SETTINGS)] # called once each
self.assertListEqual(self._storage.get_node.call_args_list, expected)
def test_no_classes_caching(self):
p = MemcacheProxy(self._storage, cache_classes=False)
NAME = 'foo'; NAME2 = 'bar'; RET = 'baz'; SETTINGS = Settings()
self._storage.get_class.return_value = RET
self.assertEqual(p.get_class(NAME, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME2, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME2, None, SETTINGS), RET)
expected = [mock.call(NAME, None, SETTINGS), mock.call(NAME, None, SETTINGS),
mock.call(NAME2, None, SETTINGS), mock.call(NAME2, None, SETTINGS)]
self.assertListEqual(self._storage.get_class.call_args_list, expected)
def test_classes_caching(self):
p = MemcacheProxy(self._storage, cache_classes=True)
NAME = 'foo'; NAME2 = 'bar'; RET = 'baz'; SETTINGS = Settings()
self._storage.get_class.return_value = RET
self.assertEqual(p.get_class(NAME, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME2, None, SETTINGS), RET)
self.assertEqual(p.get_class(NAME2, None, SETTINGS), RET)
expected = [mock.call(NAME, None, SETTINGS), mock.call(NAME2, None, SETTINGS)] # called once each
self.assertListEqual(self._storage.get_class.call_args_list, expected)
def test_nodelist_no_caching(self):
p = MemcacheProxy(self._storage, cache_nodelist=False)
p.enumerate_nodes()
p.enumerate_nodes()
expected = [mock.call(), mock.call()]
self.assertListEqual(self._storage.enumerate_nodes.call_args_list, expected)
def test_nodelist_caching(self):
p = MemcacheProxy(self._storage, cache_nodelist=True)
p.enumerate_nodes()
p.enumerate_nodes()
expected = [mock.call()] # once only
self.assertListEqual(self._storage.enumerate_nodes.call_args_list, expected)
if __name__ == '__main__':
unittest.main()
| 4,067 | 1,381 |
from fastapi import FastAPI
from elasticapm.contrib.starlette import ElasticAPM
from starlette_exporter import PrometheusMiddleware, handle_metrics
from app.api import database, engine, metadata
from app.api.auth import session
from app.api.reports import reports, apm
metadata.create_all(engine)
app = FastAPI(openapi_url="/api/reports/openapi.json", docs_url="/api/reports/docs")
app.add_middleware(PrometheusMiddleware)
app.add_route("/api/reports/metrics", handle_metrics)
app.add_middleware(ElasticAPM, client=apm)
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
await session.close()
app.include_router(reports, prefix='/api/reports', tags=['reports'])
| 780 | 259 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 16:20:49 2016
@author: Philippe
"""
import numpy as np
from scipy.sparse.linalg import svds
from functools import partial
def em_svd(Y, k=None, tol=1e-3, maxiter=None):
"""
Approximate SVD on data with missing values via expectation-maximization
Parameters
----------
Y: (nobs, ndim) data matrix, missing values denoted by NaN/Inf
k: number of singular values/vectors to find (default: k=ndim)
tol: convergence tolerance on change in trace norm
maxiter: maximum number of EM steps to perform (default: no limit)
Returns
-------
Y_hat: (nobs, ndim) reconstructed data matrix
mu_hat: (ndim,) estimated column means for reconstructed data
U, s, Vt: singular values and vectors (see np.linalg.svd and
scipy.sparse.linalg.svds for details)
"""
if k is None:
svdmethod = partial(np.linalg.svd, full_matrices=False)
else:
svdmethod = partial(svds, k=k)
if maxiter is None:
maxiter = np.inf
# initialize the missing values to their respective column means
mu_hat = np.nanmean(Y, axis=0, keepdims=1)
valid = np.isfinite(Y)
Y_hat = np.where(valid, Y, mu_hat)
halt = False
ii = 1
v_prev = 0
U, s, Vt = None, None, None
while not halt:
# SVD on filled-in data
U, s, Vt = svdmethod(Y_hat - mu_hat)
# impute missing values
Y_hat[~valid] = (U.dot(np.diag(s)).dot(Vt) + mu_hat)[~valid]
# update bias parameter
mu_hat = Y_hat.mean(axis=0, keepdims=1)
# test convergence using relative change in trace norm
v = s.sum()
print((v - v_prev) / v_prev)
if ii >= maxiter or ((v - v_prev) / v_prev) < tol:
halt = True
ii += 1
v_prev = v
return Y_hat, mu_hat, U, s, Vt
| 1,904 | 679 |
from picraftzero.log import logger
from picraftzero.interfaces.hardware.providers import MotorProvider
from picraftzero.utils import constrain
from picraftzero.utils import dedupe
from picraftzero.thirdparty import piconzero as pz
class PiconzeroMotor(MotorProvider):
def __init__(self, motor_id):
self.motor_id = motor_id
self._last_speed = None
pz.init()
def begin(self):
pass
def end(self):
pass
#@dedupe
def set_speed(self, speed):
msg = "set_speed({}, {})".format(self.motor_id, speed)
logger.debug(msg)
if speed == self._last_speed or speed is None:
return
self._last_speed = speed
logger.info(msg)
# TODO: scale -100..100 to -128..127
speed = constrain(speed, -128, 127)
pz.setMotor(self.motor_id, speed)
| 862 | 297 |
# example of an average forecast
from numpy import mean
from numpy import median
# one-step average forecast
def average_forecast(history, config):
n, avg_type = config
# mean of last n values
if avg_type is 'mean':
return mean(history[-n:])
# median of last n values
return median(history[-n:])
# define dataset
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
print(data)
# test naive forecast
for i in range(1, len(data)+1):
print(average_forecast(data, (i, 'mean'))) | 500 | 210 |
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.algorithms.cutout.cutout import CutOut as CutOut
from composer.algorithms.cutout.cutout import CutOutHparams as CutOutHparams
from composer.algorithms.cutout.cutout import cutout as cutout
_name = 'CutOut'
_class_name = 'CutOut'
_functional = 'cutout'
_tldr = 'Randomly erases rectangular blocks from the image.'
_attribution = '(DeVries et al, 2017)'
_link = 'https://arxiv.org/abs/1708.04552'
_method_card = ''
| 477 | 182 |
#encoding:utf-8
import six
from genius.tools import StringHelper
class Word(object):
string_helper = StringHelper()
def __init__(self, text, **kwargs):
self.text = text # 词
self.freq = kwargs.get('freq', 0) # 词频
self.tagging = kwargs.get('tagging', 'unknown') # 词性
# dic:字典,crf:crf生成,break:打断字典,pinyin拼音分词
self.source = kwargs.get('source', 'unknown')
self.offset = kwargs.get('offset', 0) # 在文本中的位置
@property
def marker(self):
"""
see string_helper.mark_text method
"""
return self.string_helper.mark_text(self.text)
def __str__(self):
if six.PY2:
return self.text.encode('utf8')
return self.text
def __len__(self):
return len(self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, obj):
if isinstance(obj, type(self)):
return (
obj.text == self.text and
obj.freq == self.freq and
obj.tagging == self.tagging and
obj.source == self.source and
obj.offset == self.offset
)
return False
| 1,191 | 397 |
from __future__ import unicode_literals
from django.db import models
class Industry(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Industries'
ordering = ['name']
class Location(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
@property
def member_count(self):
return self.member_set.count()
class Meta:
ordering = ['name']
class Member(models.Model):
UNAVAILABLE = 0
AVAILABLE = 1
STATUS = ((0, 'Unavailable'), (1, 'Available'))
MALE = 0
FEMALE = 1
GENDERS = ((0, 'Female'), (1, 'Male'))
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField()
phone = models.CharField(max_length=20)
location = models.ForeignKey(Location)
birthday = models.DateField()
availability = models.PositiveIntegerField(choices=STATUS)
gender = models.PositiveIntegerField(choices=GENDERS, null=True)
industry = models.ForeignKey(Industry, null=True)
nok_name = models.CharField(max_length=50, blank=True)
nok_phone = models.CharField(max_length=20, blank=True)
nok_email = models.EmailField(blank=True, null=True)
username = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return self.email
| 1,462 | 492 |
#!/usr/bin/env python
"""
@file test.py
@author Pablo Alvarez Lopez
@date 2016-11-25
@version $Id$
python script used by sikulix for testing netedit
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
# import common functions for netedit tests
import os
import sys
testRoot = os.path.join(os.environ.get('SUMO_HOME', '.'), 'tests')
neteditTestRoot = os.path.join(
os.environ.get('TEXTTEST_HOME', testRoot), 'netedit')
sys.path.append(neteditTestRoot)
import neteditTestFunctions as netedit # noqa
# Open netedit
neteditProcess, match = netedit.setupAndStart(neteditTestRoot)
# go to additional mode
netedit.additionalMode()
# select chargingStation
netedit.changeAdditional("chargingStation")
# set name
netedit.modifyAdditionalDefaultValue(2, "chargingStation")
# set friendlyPos
netedit.modifyAdditionalDefaultBoolValue(3)
# set invalid charging power
netedit.modifyAdditionalDefaultValue(4, "-200")
# try to create chargingStation in mode "reference left"
netedit.leftClick(match, 250, 250)
# set valid charging power
netedit.modifyAdditionalDefaultValue(4, "12000")
# create chargingStation in mode "reference left"
netedit.leftClick(match, 250, 250)
# change reference to right
netedit.modifyAdditionalDefaultValue(9, "reference right")
# set invalid efficiency
netedit.modifyAdditionalDefaultValue(5, "2")
# try create chargingStation in mode "reference right"
netedit.leftClick(match, 240, 250)
# set valid efficiency
netedit.modifyAdditionalDefaultValue(5, "0.3")
# create chargingStation in mode "reference right"
netedit.leftClick(match, 240, 250)
# change reference to center
netedit.modifyAdditionalDefaultValue(9, "reference center")
# Change change in transit
netedit.modifyAdditionalDefaultBoolValue(6)
# create chargingStation in mode "reference center"
netedit.leftClick(match, 425, 250)
# Change length
netedit.modifyAdditionalDefaultValue(11, "30")
# change reference to "reference left"
netedit.modifyAdditionalDefaultValue(9, "reference left")
# set invalid charge delay
netedit.modifyAdditionalDefaultValue(7, "-5")
# try to create a chargingStation in mode "reference left" forcing poisition
netedit.leftClick(match, 500, 250)
# valid charge delay
netedit.modifyAdditionalDefaultValue(7, "7")
# create a chargingStation in mode "reference left" forcing poisition
netedit.leftClick(match, 500, 250)
# change reference to "reference right"
netedit.modifyAdditionalDefaultValue(9, "reference right")
# create a chargingStation in mode "reference right"
netedit.leftClick(match, 110, 250)
# disable friendlyPos
netedit.modifyAdditionalDefaultBoolValue(3)
# change reference to "reference left"
netedit.modifyAdditionalDefaultValue(9, "reference left")
# create a chargingStation in mode "reference left" without friendlyPos
netedit.leftClick(match, 120, 215)
# change reference to "reference right"
netedit.modifyAdditionalDefaultValue(9, "reference right")
# create a chargingStation in mode "reference right" without friendlyPos
netedit.leftClick(match, 500, 215)
# Check undo redo
netedit.undo(match, 8)
netedit.redo(match, 8)
# save additionals
netedit.saveAdditionals()
# Fix stopping places position
netedit.fixStoppingPlace("fixPositions")
# save newtork
netedit.saveNetwork()
# quit netedit
netedit.quit(neteditProcess)
| 3,610 | 1,187 |
''' Extract a subset of lines from a file. '''
import random
def extract_n(datalist, number, save_header=True):
start = 1 if save_header else 0
subset = []
if save_header:
subset.append(datalist[0])
sample_indices = random.sample(range(start, len(datalist)), number)
for idx in sample_indices:
subset.append(datalist[idx])
return subset
| 360 | 124 |
import torch
import torch.utils.data
import numpy as np
import csv
import random
import scvi
import scanpy as sc
import json
from collections import Counter
import json
import os
def convert_num(labels):
if os.path.exists('label2id.json'):
with open('label2id.json', 'r') as j:
label2id = json.load(j)
else:
label2id = {}
ctr = Counter(labels)
for label in ctr:
label2id[label] = len(label2id)
with open('label2id.json', 'w') as j:
json.dump(label2id, j)
label_id_lt = []
for label in labels:
label_id = label2id[label]
label_id_lt.append(label_id)
return np.array(label_id_lt)
class RepDataset(torch.utils.data.Dataset):
def __init__(self, rep_path):
matrix = []
labels = []
with open(rep_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
rep = row[:-1]
rep = [float(item) for item in rep]
matrix.append(rep)
labels.append(row[-1])
line_count += 1
self.mat = np.array(matrix)
self.labels = convert_num(labels)
print(f'rep num: {len(self.labels)}')
def get_weight(self):
label_num = {}
for label in self.labels:
if label in label_num:
label_num[label] += 1
else:
label_num[label] = 1
weight = []
for i in range(len(label_num)):
weight.append(1.0/label_num[i])
weight = np.array(weight)
weight = weight/ np.sum(weight)
return weight
def get_rep_dim(self):
return self.mat.shape[1]
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
view = torch.Tensor(self.mat[idx])
label = torch.LongTensor([int(self.labels[idx])])
return (view, label)
| 2,005 | 637 |
# -*- coding: utf-8 -*
import wx
import matplotlib
matplotlib.use("WxAgg")
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
class MatplotlibPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.toolbar = self.add_toolbar()
self.SetSizer(self.sizer)
self.Fit()
def add_toolbar(self):
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.Realize()
self.sizer.Add(toolbar, 0, wx.ALIGN_CENTER | wx.EXPAND)
toolbar.update()
return toolbar
if __name__ == '__main__':
app = wx.App()
frame = wx.Frame(None, title='demo app')
p = MatplotlibPanel(frame)
frame.Show(True)
app.MainLoop()
| 1,091 | 373 |
import argparse
import os
import re
import warnings
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.sql import SQLClient
warnings.filterwarnings('ignore')
# set input arguments
parser = argparse.ArgumentParser(
description='Exports the CREATE TABLE scripts of all the account datasets')
parser.add_argument('--organization', type=str, dest='organization',
default=os.environ['CARTO_ORG'] if 'CARTO_ORG' in os.environ else '',
help='Set the name of the organization' +
' account (defaults to env variable CARTO_ORG)')
parser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',
default=os.environ['CARTO_API_URL'] if 'CARTO_API_URL' in os.environ else '',
help='Set the base URL. For example:' +
' https://username.carto.com/ ' +
'(defaults to env variable CARTO_API_URL)')
parser.add_argument('--api_key', dest='CARTO_API_KEY',
default=os.environ['CARTO_API_KEY'] if 'CARTO_API_KEY' in os.environ else '',
help='Api key of the account' +
' (defaults to env variable CARTO_API_KEY)')
args = parser.parse_args()
# Authenticate to CARTO account
if args.CARTO_BASE_URL and args.CARTO_API_KEY and args.organization:
auth_client = APIKeyAuthClient(
args.CARTO_BASE_URL, args.CARTO_API_KEY, args.organization)
dataset_manager = DatasetManager(auth_client)
else:
logger.error('You need to provide valid credentials, run with -h parameter for details')
import sys
sys.exit(1)
# SQL wrapper
sql = SQLClient(APIKeyAuthClient(args.CARTO_BASE_URL, args.CARTO_API_KEY))
# get username from base_url
substring = re.search('https://(.+?).carto.com', args.CARTO_BASE_URL)
if substring:
username = substring.group(1)
# check all table name of account
all_tables = []
tables = sql.send(
"select pg_class.relname from pg_class, pg_roles, pg_namespace" +
" where pg_roles.oid = pg_class.relowner and " +
"pg_roles.rolname = current_user " +
"and pg_namespace.oid = pg_class.relnamespace and pg_class.relkind = 'r'")
q = "select \
'CREATE TABLE ' || relname || E'\n(\n' || \
array_to_string( \
array_agg( \
' ' || column_name || ' ' || type || ' '|| not_null \
) \
, E',\n' \
) || E'\n);\n' as create_table \
from \
( \
select \
distinct on (column_name) c.relname, a.attname AS column_name, \
pg_catalog.format_type(a.atttypid, a.atttypmod) as type, \
case \
when a.attnotnull \
then 'NOT NULL' \
else 'NULL' \
END as not_null \
FROM pg_class c, \
pg_attribute a, \
pg_type t \
WHERE c.relname = '{table_name}' \
AND a.attnum > 0 \
AND a.attrelid = c.oid \
AND a.atttypid = t.oid \
and a.attname not in ('cartodb_id', 'the_geom_webmercator') \
ORDER BY column_name, a.attnum \
) as tabledefinition \
group by relname"
with open('create_table.sql', 'w') as f:
for k, v in tables.items():
if k == 'rows':
for itr in v:
try:
dataset_name = itr['relname']
print("Found dataset: " + dataset_name)
result = sql.send(q.format(table_name=dataset_name))
create_table = result['rows'][0]['create_table']
f.write(create_table + "\n")
except:
print("Error while exporting: " + dataset_name)
continue
f.close()
print('\nScript exported')
| 3,608 | 1,192 |
import socket
import sys
import threading
import time
import pyaudio
recipient = sys.argv[1]
inPort = int(sys.argv[2])
outPort = int(sys.argv[3])
def poll(sock):
while True:
data, addr = sock.recvfrom(1024)
print(data.decode())
inSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
outSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
inSock.bind((host, inPort))
polling = threading.Thread(target = poll, args = (inSock,))
polling.daemon = True
polling.start()
while True:
msg = input()
if msg == "/q":
sys.exit()
else:
outSock.sendto(str.encode(msg), (recipient, outPort))
| 669 | 258 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 09:14:27 2019
@author: hcji
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # 这一行注释掉就是使用cpu,不注释就是使用gpu
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Add, concatenate, Conv1D, MaxPooling1D, Flatten
from keras.engine.topology import Layer
from keras import optimizers
from scipy.stats import pearsonr
'''
data_l = np.load('Data/data_l.npy')
data_r = np.load('Data/data_r.npy')
label = np.load('Data/data_label.npy')
test_l = np.load('Data/test_v1.npy')
test_r = np.load('Data/test_v2.npy')
test_label = np.load('Data/test_l.npy')
'''
class ZeroPadding(Layer):
def __init__(self, **kwargs):
super(ZeroPadding, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.zeros_like(x)
def get_output_shape_for(self, input_shape):
return input_shape
class CorrnetCost(Layer):
def __init__(self,lamda, **kwargs):
super(CorrnetCost, self).__init__(**kwargs)
self.lamda = lamda
def cor(self,y1, y2, lamda):
y1_mean = K.mean(y1, axis=0)
y1_centered = y1 - y1_mean
y2_mean = K.mean(y2, axis=0)
y2_centered = y2 - y2_mean
corr_nr = K.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = K.sqrt(K.sum(y1_centered * y1_centered, axis=0) + 1e-8)
corr_dr2 = K.sqrt(K.sum(y2_centered * y2_centered, axis=0) + 1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr / corr_dr
return K.sum(corr) * lamda
def call(self ,x ,mask=None):
h1=x[0]
h2=x[1]
corr = self.cor(h1,h2,self.lamda)
#self.add_loss(corr,x)
#we output junk but be sure to use it for the loss to be added
return corr
def get_output_shape_for(self, input_shape):
#print input_shape[0][0]
return (input_shape[0][0],input_shape[0][1])
def corr_loss(y_true, y_pred):
#print y_true.type,y_pred.type
#return K.zeros_like(y_pred)
return y_pred
class CorrNet:
def __init__(self, data_l, data_r, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.Lambda = Lambda
self.nb_epoch = nb_epoch
dimx = self.data_l.shape[1]
dimy = self.data_r.shape[1]
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Dense(256, activation='relu')(inpx)
hx = Dense(128, activation='relu')(hx)
hy = Dense(256, activation='relu')(inpy)
hy = Dense(128, activation='relu')(hy)
h = Add()([hx,hy])
recx = Dense(128, activation='relu')(h)
recx = Dense(256, activation='relu')(recx)
recx = Dense(dimx, activation='relu')(recx)
recy = Dense(128, activation='relu')(h)
recy = Dense(256, activation='relu')(recy)
recy = Dense(dimy, activation='relu')(recy)
branchModel = Model([inpx,inpy], [recx,recy,h])
[recx1,recy1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[recx2,recy2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[recx3,recy3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[recx1,recx2,recx3,recy1,recy2,recy3,corr])
model.compile(loss=["mse","mse","mse","mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[data_l,data_l,data_l,data_r,data_r,data_r,np.ones(data_l.shape)], epochs=nb_epoch)
def left_to_right(self, new_data_l):
branchModel = self.branchModel
_,new_data_r,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return new_data_r
def right_to_left(self, new_data_r):
branchModel = self.branchModel
new_data_l,_,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return new_data_l
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,_,h = branchModel.predict([new_data_l, new_data_r])
return h
### Some modification based on the the original model
### Not evaluate performance
class CorrTarget:
def __init__(self, data_l, data_r, target, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.target = target
self.nb_epoch = nb_epoch
self.Lambda = Lambda
dimx = self.data_l.shape[1]
dimy = self.data_r.shape[1]
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Dense(256, activation='relu')(inpx)
hx = Dense(128, activation='relu')(hx)
hy = Dense(256, activation='relu')(inpy)
hy = Dense(128, activation='relu')(hy)
h = Add()([hx,hy])
t = Dense(64, activation='relu')(h)
t = Dense(32, activation='relu')(t)
t = Dense(1, activation='relu')(t)
branchModel = Model([inpx,inpy], [t,h])
[t1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[t2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[t3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[t1, t2, t3, corr])
model.compile(loss=["mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
target = self.target
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[target, target, target, np.ones(data_l.shape)], epochs=nb_epoch)
def predict_by_left(self, new_data_l):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return t
def predict_by_right(self, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return t
def predict_by_both(self, new_data_l, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, new_data_r])
return t
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, new_data_r])
return h
class ConvCorrTarget:
def __init__(self, data_l, data_r, target, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.target = target
self.nb_epoch = nb_epoch
self.Lambda = Lambda
dimx = self.data_l.shape[1:3]
dimy = self.data_r.shape[1:3]
inpx = Input(shape=dimx)
inpy = Input(shape=dimy)
hx = Conv1D(64, 3, activation='relu', kernel_initializer='normal')(inpx)
hx = MaxPooling1D(2)(hx)
hx = Conv1D(32, 3, activation='relu', kernel_initializer='normal')(hx)
hx = MaxPooling1D(2)(hx)
hy = Conv1D(64, 3, activation='relu', kernel_initializer='normal')(inpy)
hy = MaxPooling1D(2)(hy)
hy = Conv1D(32, 3, activation='relu', kernel_initializer='normal')(hy)
hy = MaxPooling1D(2)(hy)
hx = Flatten()(hx)
hy = Flatten()(hy)
h = Add()([hx,hy])
t = Dense(16, activation='relu')(h)
t = Dense(1, activation='relu')(t)
branchModel = Model([inpx,inpy], [t,h])
[t1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[t2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[t3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[t1, t2, t3, corr])
model.compile(loss=["mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
target = self.target
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[target, target, target, np.ones(data_l.shape)], epochs=nb_epoch)
def predict_by_left(self, new_data_l):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return t
def predict_by_right(self, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return t
def predict_by_both(self, new_data_l, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, new_data_r])
return t
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, new_data_r])
return h | 10,450 | 3,970 |
has_high_income = True
has_good_credit = True
if has_high_income and has_good_credit:
print("Eligible for loan")
else:
print("Ineligible for loan")
| 164 | 67 |
from djcelery.models import WorkerState
from django.views.generic import ListView
import datetime
class MonitorListView(ListView):
model = WorkerState
queryset = WorkerState.objects.filter(last_heartbeat__gte=datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
# todo paginate the list so we don't dump everything
def dispatch(self, request, *args, **kwargs):
return super(MonitorListView, self).dispatch(request, *args, **kwargs)
| 454 | 144 |
"""
Init module for main serializers
"""
from kaznet.apps.main.serializers.bounty import BountySerializer # noqa
from kaznet.apps.main.serializers.client import ClientSerializer # noqa
from kaznet.apps.main.serializers.contenttype import KaznetContentTypeSerializer # noqa
from kaznet.apps.main.serializers.locations import KaznetLocationSerializer # noqa
from kaznet.apps.main.serializers.occurences import KaznetTaskOccurrenceSerializer # noqa
from kaznet.apps.main.serializers.submissions import (KaznetSubmissionSerializer, # noqa
SubmissionExportSerializer) # noqa
from kaznet.apps.main.serializers.task_location import (TaskLocationCreateSerializer, # noqa
TaskLocationSerializer)
from kaznet.apps.main.serializers.locationttypes import KaznetLocationTypeSerializer # noqa
from kaznet.apps.main.serializers.tasks import KaznetTaskSerializer # noqa
| 974 | 263 |
import torch
import torchvision.models as models
from torch.profiler import profile, record_function, ProfilerActivity
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
from signal_perceptron import *
from sp_paper_models import *
import time
from train import *
#Loading datasets:
#######################################################################################################################
training_data_mnist = datasets.MNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data_mnist = datasets.MNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
training_data_f_mnist = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data_f_mnist = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size=64
# Create data loaders.
train_mnist_dataloader = DataLoader(training_data_mnist, batch_size=batch_size)
test_mnist_dataloader = DataLoader(test_data_mnist, batch_size=batch_size)
train_f_mnist_dataloader = DataLoader(training_data_f_mnist, batch_size=batch_size)
test_f_mnist_dataloader = DataLoader(test_data_f_mnist, batch_size=batch_size)
def full_analysis_train(train_mnist_dataloader,test_mnist_dataloader,train_f_mnist_dataloader,test_f_mnist_dataloader):
#Loading Models:
##################################################################################################################################
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
fsp128 = FSP_mnist(128).to(device)
fsp = FSP_mnist().to(device)
mlp1 = MLP1_mnist().to(device)
mlp2 = MLP2_mnist().to(device)
#Saving Initial values of parameters
PATH="data/models/idm_FSP128_mnist.pt"
torch.save(fsp128.state_dict(),PATH)
PATH1="data/models/idm_FSP512_mnist.pt"
torch.save(fsp.state_dict(),PATH1)
PATH2="data/models/idm_MLP1_mnist.pt"
torch.save(mlp1.state_dict(),PATH2)
PATH3="data/models/idm_MLP2_mnist.pt"
torch.save(mlp2.state_dict(),PATH3)
#MODEL PROPERTIES:
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
#Printing Learnable Parameters
################################################################################################################################
fsp128_parameters = filter(lambda p: p.requires_grad, fsp128.parameters())
fsp128_params = sum([np.prod(p.size()) for p in fsp128_parameters])
fsp_parameters = filter(lambda p: p.requires_grad, fsp.parameters())
fsp_params = sum([np.prod(p.size()) for p in fsp_parameters])
mlp1_parameters = filter(lambda p: p.requires_grad, mlp1.parameters())
mlp1_params = sum([np.prod(p.size()) for p in mlp1_parameters])
mlp2_parameters = filter(lambda p: p.requires_grad, mlp2.parameters())
mlp2_params = sum([np.prod(p.size()) for p in mlp2_parameters])
print("Learnable Parameters for MNIST models:")
print("FSP128 \t FSP512 \t MLP 1 hidden \t MLP 2 hidden")
print(fsp128_params,"\t",fsp_params,"\t",mlp1_params,"\t",mlp2_params)
#################################################################################################################################
#Memory:
#Forward PassTime
#################################################################################################################################
train_features, train_labels = next(iter(train_mnist_dataloader))
inputs=train_features[0]
inputs=inputs.to(device)
print(train_features.size())
print(inputs.size())
#Warmup
t2 = time.time()
pred2=mlp1(inputs)
elapsed2 = time.time() - t2
timer2=Timer(mlp1,inputs)
############
t22 = time.time()
pred22=mlp1(inputs)
elapsed22 = time.time() - t22
timer22=Timer(mlp1,inputs)
t3 = time.time()
pred3=mlp2(inputs)
elapsed3 = time.time() - t3
timer3=Timer(mlp2,inputs)
t11 = time.time()
pred1=fsp128(inputs)
elapsed11 = time.time() - t11
timer11=Timer(fsp128,inputs)
t1 = time.time()
pred1=fsp(inputs)
elapsed1 = time.time() - t1
timer1=Timer(fsp,inputs)
print("Forward time for MNIST models:")
print("FSP128 \t FSP512 \t MLP 1 hidden \t MLP 2 hidden")
print(elapsed11,"\t",elapsed1,"\t",elapsed22,"\t",elapsed3)
print("Forward time for MNIST models FS Timer class:")
print("FSP128 \t FSP512 \t MLP 1 hidden \t MLP 2 hidden")
print(timer11.mean(),"\t",timer1.mean(),"\t",timer22.mean(),"\t",timer3.mean())
#Profiler(Only Pytorch)------------------------------------
with profile(activities=[
ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference mlp1"):
mlp1(inputs)
#print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
with profile(activities=[
ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference mlp1"):
mlp1(inputs)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
with profile(activities=[
ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference mlp2"):
mlp2(inputs)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
with profile(activities=[
ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference fsp128"):
fsp128(inputs)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
with profile(activities=[
ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("model_inference fsp512"):
fsp(inputs)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
#################################################################################################################################
#Backward PassTime
#################################################################################################################################
#################################################################################################################################
#################################################################################################################################
#################################################################################################################################
#MODELS TRAINING
#################################################################################################################################
#################################################################################################################################
#################################################################################################################################
#Training Hyperparameters:
#----------------------------------------------------------
epochs = 9
lr=.001
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(fsp128.parameters(), lr=lr)
optimizer1 = torch.optim.Adam(fsp.parameters(), lr=lr)
optimizer2 = torch.optim.Adam(mlp1.parameters(), lr=lr)
optimizer3 = torch.optim.Adam(mlp2.parameters(), lr=lr)
#optimizer = torch.optim.SGD(fsp128.parameters(), lr=lr)
#optimizer1 = torch.optim.SGD(fsp.parameters(), lr=lr)
#optimizer2 = torch.optim.SGD(mlp1.parameters(), lr=lr)
#optimizer3 = torch.optim.SGD(mlp2.parameters(), lr=lr)
print("Optimizer: Adam ,lr:" ,lr)
#print("Optimizer: SGD ,lr:" ,lr)
#----------------------------------------------------------
print("Training models with MNIST DATASET :")
print("Fourier Signal Perceptron 128")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_mnist_dataloader, fsp128, loss_fn, optimizer,device)
accuracy,loss2=test_mnist(test_mnist_dataloader, fsp128, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("Fourier Signal Perceptron 512")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_mnist_dataloader, fsp, loss_fn, optimizer1,device)
accuracy,loss2=test_mnist(test_mnist_dataloader, fsp, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("MLP 1 hidden layer Signal Perceptron")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_mnist_dataloader, mlp1, loss_fn, optimizer2,device)
accuracy,loss2=test_mnist(test_mnist_dataloader, mlp1, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("MLP 2 hidden layer Signal Perceptron")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_mnist_dataloader, mlp2, loss_fn, optimizer3,device)
accuracy,loss2=test_mnist(test_mnist_dataloader, mlp2, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("Training models with FashionMNIST DATASET :")
#Loading initial parameters
fsp128.load_state_dict(torch.load(PATH))
fsp.load_state_dict(torch.load(PATH1))
mlp1.load_state_dict(torch.load(PATH2))
mlp2.load_state_dict(torch.load(PATH3))
epochs = 9
print("Fourier Signal Perceptron 128")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_f_mnist_dataloader, fsp128, loss_fn, optimizer,device)
accuracy,loss2=test_mnist(test_f_mnist_dataloader, fsp128, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("Fourier Signal Perceptron 512")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_f_mnist_dataloader, fsp, loss_fn, optimizer1,device)
accuracy,loss2=test_mnist(test_f_mnist_dataloader, fsp, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("MLP 1 hidden layer")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_f_mnist_dataloader, mlp1, loss_fn, optimizer2,device)
accuracy,loss2=test_mnist(test_f_mnist_dataloader, mlp1, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
print("MLP 2 hidden layer")
optimal_epoch=[]
time_backprop=[]
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
loss1,tb=train_mnist(train_f_mnist_dataloader, mlp2, loss_fn, optimizer3,device)
accuracy,loss2=test_mnist(test_f_mnist_dataloader, mlp2, loss_fn,device)
if not bool(optimal_epoch):
optimal_epoch=[t,accuracy, loss2,loss1]
if bool(optimal_epoch):
if optimal_epoch[2]>loss2:
optimal_epoch=[t,accuracy, loss2,loss1]
time_backprop.append(tb)
print("Backprop time:")
con=np.concatenate(time_backprop)
print(np.mean(con))
print("Final epoch:")
print(epochs,accuracy,loss2,loss1)
print("Optimal epoch:")
print(optimal_epoch)
#################################################################################################################################
#################################################################################################################################
#################################################################################################################################
import sys
print("This experiment is gona be run ",sys.argv[-1], " times:")
n= int(sys.argv[-1])
for i in range(n):
orig_stdout = sys.stdout
subfolder="run"+str(i+1)+"/"
subname="mnist_dataset_log"
out="data/experiments/exp2/"+subfolder+subname+".txt"
f = open(out, 'w+')
sys.stdout = f
full_analysis_train(train_mnist_dataloader,test_mnist_dataloader,train_f_mnist_dataloader,test_f_mnist_dataloader)
sys.stdout = orig_stdout
f.close()
| 16,095 | 5,373 |
from openpyxl.styles import PatternFill
# Color dict for background fill
COLORS = {'red': PatternFill(fgColor='FF0000', fill_type='solid'),
'green': PatternFill(fgColor='00b050', fill_type='solid'),
'orange': PatternFill(fgColor='FFC000', fill_type='solid'),
'yellow': PatternFill(fgColor='FFFF00', fill_type='solid'),
'gray': PatternFill(fgColor='C0C0C0', fill_type='solid')}
| 418 | 143 |
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import (
TYPE_CHECKING,
Union,
Optional
)
import discord
from .Base import DatabaseChecker
from .Punishments import Punisher
if TYPE_CHECKING:
from .Punishments import Punishment
from discord.ext import commands
class UnbanFailure(Exception):
"""Raises an exception when the user tries to unban a discord.User without passing the guild."""
class BanManager(DatabaseChecker, Punisher):
def __init__(self, bot: commands.Bot):
super().__init__([{'guild': "snowflake", 'member': "snowflake", 'reason': "string", 'timestamp': "snowflake"}],
['bans'])
self.bot = bot
self.add_event(self.on_database_connect)
async def on_database_connect(self):
self.bot.loop.create_task(self.__check_bans())
async def get_banned_members(self):
"""
This function returns all the members that are supposed to be unbanned but are banned.
:return:
"""
return [x for x in await self.database.select(self.tables['bans'], [], fetchall=True)
if x["timestamp"] <= datetime.utcnow().timestamp()]
async def __check_bans(self) -> None:
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for banned_member in await self.get_banned_members():
guild = self.bot.get_guild(banned_member['guild'])
if guild is None:
continue
user = await self.bot.fetch_user(banned_member['member'])
if await self.unban(user, guild):
await self.call_event("on_unban", user, banned_member['reason'])
await asyncio.sleep(300)
async def punish(self, ctx: commands.Context, member: discord.Member, punishment: Punishment) -> None:
try:
self.bot.loop.create_task(
self.ban(member, punishment.punishment_reason, punishment.punishment_time.total_seconds())
)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
@staticmethod
async def get_ban(member: Union[discord.Member, discord.User], guild: discord.Guild) -> Optional[discord.User]:
banned = await guild.bans()
for x in banned:
if x.user.id == member.id:
return x.user
async def unban(self, member: Union[discord.Member, discord.User], guild: discord.Guild = None) -> bool:
self._check_database()
if isinstance(member, discord.User) and not guild:
raise UnbanFailure("Cannot unban a discord.User without a guild.")
guild = guild if guild is not None else member.guild
await self.database.delete(self.tables['bans'], {'guild': guild.id, 'member': member.id})
if user := await self.get_ban(member, guild):
await guild.unban(user)
return True
async def ban(self,
member: discord.Member,
reason: str = "No reason provided.",
time_of_ban: Union[int, float] = 0) -> None:
self._check_database()
await member.ban(reason=reason)
if time_of_ban <= 0:
return
await self.database.insert(self.tables['bans'], {'guild': member.guild.id,
'member': member.id,
'reason': reason,
'timestamp': datetime.utcnow().timestamp() + time_of_ban})
await asyncio.sleep(time_of_ban)
if await self.unban(member):
await self.call_event("on_unban", member, reason)
| 3,847 | 1,123 |
# languages = [
# ("EL", "EL"),
# ("EN", "EN"),
# ]
# gender = [
# ("", "---"),
# ("M", "Άρρεν / Male"),
# ("F", "Θήλυ / Female"),
# ]
#
# age = [
# ("", "---"),
# ("u17", "-17"),
# ("18-25", "18-25"),
# ("26-35", "26-35"),
# ("36-45", "36-45"),
# ("46-54", "46-54"),
# ("55-64", "55-64"),
# ("65+", "65+"),
# ]
#
# education = [
# ("", "---"),
# ("gymnasium", "Γυμνάσιο / Gymnasium"),
# ("lyceum", "Λύκειο - Τεχνική Σχολή / Lyceum"),
# ("BSc", "Πτυχίο / Bachelor"),
# ("MSc", "Μεταπτυχιακό / Master's"),
# ("PhD", "Διδακτορικό / Doctorate"),
# ]
#
# district = [
# ("", "---"),
# ("lemesos", "Λεμεσός/ Λεμεσός"),
# ("nicosia", "Λευκωσία / Nicosia"),
# ("larnaca", "Λάρνακα / Larnaca"),
# ("paphos", "Πάφος / Paphos"),
# ("ammochostos", "Αμμόχωστος / Ammochostos"),
# ("kyrenia", "Κερύνεια / Kyrenia"),
# ]
#
# residence = [
# ("", "---"),
# ("city", "Πόλη / City"),
# ("suburb", "Προάστιο / Suburb"),
# ("village", "Χωριό / Village"),
# ]
#
# answer_category = [
# ("boolean", "True / False"),
# ("likert6", "6 point Likert"),
# ("likert7", "7 point Likert"),
# ]
| 1,198 | 615 |
import mogp_emulator
import numpy as np
# simple Dimension Reduction examples
# simulator function -- returns a single "important" dimension from
# at least 4 inputs
def f(x):
return (x[0]-x[1]+2.*x[3])/3.
# Experimental design -- create a design with 5 input parameters
# all uniformly distributed over [0,1].
ed = mogp_emulator.LatinHypercubeDesign(5)
# sample space
inputs = ed.sample(100)
# run simulation
targets = np.array([f(p) for p in inputs])
###################################################################################
# First example -- dimension reduction given a specified number of dimensions
# (note that in real life, we do not know that the underlying simulation only
# has a single dimension)
print("Example 1: Basic Dimension Reduction")
# create DR object with a single reduced dimension (K = 1)
dr = mogp_emulator.gKDR(inputs, targets, K=1)
# use it to create GP
gp = mogp_emulator.fit_GP_MAP(dr(inputs), targets)
# create 5 target points to predict
predict_points = ed.sample(5)
predict_actual = np.array([f(p) for p in predict_points])
means = gp(dr(predict_points))
for pp, m, a in zip(predict_points, means, predict_actual):
print("Target point: {} Predicted mean: {} Actual mean: {}".format(pp, m, a))
###################################################################################
# Second Example: Estimate dimensions from data
print("Example 2: Estimate the number of dimensions from the data")
# Use the tune_parameters method to use cross validation to create DR object
# Note this is more realistic than the above as it does not know the
# number of dimensions in advance
dr_tuned, loss = mogp_emulator.gKDR.tune_parameters(inputs, targets,
mogp_emulator.fit_GP_MAP,
cXs=[3.], cYs=[3.])
# Get number of inferred dimensions (usually gives 2)
print("Number of inferred dimensions is {}".format(dr_tuned.K))
# use object to create GP
gp_tuned = mogp_emulator.fit_GP_MAP(dr_tuned(inputs), targets)
# create 10 target points to predict
predict_points = ed.sample(5)
predict_actual = np.array([f(p) for p in predict_points])
means = gp_tuned(dr_tuned(predict_points))
for pp, m, a in zip(predict_points, means, predict_actual):
print("Target point: {} Predicted mean: {} Actual mean: {}".format(pp, m, a))
| 2,393 | 738 |
class ReusableMongodbConnectionError(ValueError):
def __init__(
self,
description: str = "An error occurred in reusable_mongodb_connection",
*args,
**kwargs
):
super().__init__(description, *args, **kwargs)
| 255 | 68 |
from flask import Flask, render_template, request, redirect, url_for, session
from flask_mysqldb import MySQL,MySQLdb
from os import path
from notifypy import Notify
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'proyecto_p'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
@app.route('/')
def home():
return render_template("contenido.html")
@app.route('/layout', methods = ["GET", "POST"])
def layout():
session.clear()
return render_template("contenido.html")
@app.route('/login', methods= ["GET", "POST"])
def login():
notificacion = Notify()
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users WHERE email=%s",(email,))
user = cur.fetchone()
cur.close()
if user!=None:
if password == user["password"]:
session['name'] = user['name']
session['email'] = user['email']
session['tipo'] = user['id_tip_usu']
if session['tipo'] == 1:
return render_template("docente/home.html")
elif session['tipo'] == 2:
return render_template("estudiante/homeTwo.html")
else:
notificacion.title = "Error de Acceso"
notificacion.message="Correo y/o contraseña incorrectos"
notificacion.send()
return render_template("login.html")
else:
notificacion.title = "Error de Acceso"
notificacion.message="El usuario no se encuentra registrado"
notificacion.send()
return render_template("login.html")
else:
return render_template("login.html")
@app.route('/registro', methods = ["GET", "POST"])
def registro():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM tip_usu")
tipo = cur.fetchall()
notificacion = Notify()
if request.method == 'GET':
return render_template("registro.html", tipo = tipo)
else:
name = request.form['name']
email = request.form['email']
password = request.form['password']
tip = request.form['tipo']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users (name, email, password, id_tip_usu) VALUES (%s,%s,%s,%s)", (name, email, password,tip,))
mysql.connection.commit()
notificacion.title = "Registro Exitoso"
notificacion.message="Ya se encuentra registrado, por favor inicie sesión para ingresar a la plataforma"
notificacion.send()
return redirect(url_for('login'))
if __name__ == '__main__':
app.secret_key = "sllave"
app.run(debug=True) | 2,909 | 869 |
from model import issuedb as idb
import os
from model import util, work_path
__GENERATE__ = False
if __GENERATE__:
SRC_DIR = 'tsv/'
TEST_DIR = 'tsv_test/'
TSV_FILE = work_path.in_project('./model/conf/tab_url.tsv')
__data_tsv = util.read_tsv(TSV_FILE)
def generate_lookup_table():
db_driver = idb.ISSuedb()
output = db_driver.db_retrieve("select name from sqlite_master where type='table' order by name;")
table_dict = {i[0].replace("$", "_"): i[0] for i in output}
file_list = os.listdir(SRC_DIR)
file_list = [os.path.join(SRC_DIR, f) for f in file_list]
file_list_test = os.listdir(TEST_DIR)
file_list_test = [os.path.join(TEST_DIR, f) for f in file_list_test]
files = file_list + file_list_test
files_dict = {i: False for i in files}
reload = util.Reload(TSV_FILE)
for item in table_dict:
flag = False
for f in files:
if item in f:
flag = True
print("{}\t{}".format(table_dict[item], f))
files_dict[f] = True
break
if not flag:
print("{}\tNULL".format(table_dict[item]))
for f in files_dict:
if not files_dict[f]:
print("NULL\t{}".format(f))
db_driver.db_close()
def table2file(table_name):
for i in range(len(__data_tsv)):
if table_name == __data_tsv[i][0]:
return __data_tsv[i][1]
def file2table(file_name):
for i in range(len(__data_tsv)):
if os.path.basename(file_name) == os.path.basename(__data_tsv[i][1]):
return __data_tsv[i][0]
if __name__ == '__main__':
# generate_lookup_table()
print(table2file("duckduckgo$Android"))
print(file2table("farmerbb_Notepad_master.tsv"))
| 1,751 | 645 |
from flask import redirect, render_template, request
from . import main
@main.route('/', methods=['GET'])
def index():
return render_template("index.html")
@main.route('/shutdown')
def server_shutdown():
shutdown = request.environ.get('werkzeug.server.shutdown')
shutdown()
return 'Shutting down...'
| 318 | 99 |
class Error(Exception):
pass
class GetError(Error):
pass
class HeadError(Error):
pass
class PostError(Error):
pass
class PatchError(Error):
pass
class PutError(Error):
pass
class RunError(Error):
pass
| 241 | 83 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 17:27:59 2020
@author: ladvien
"""
import sys
import os
import cv2
import numpy as np
from random import randint
import matplotlib.pyplot as plt
image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)
from image_utils import ImageUtils
iu = ImageUtils()
def noisy(noise_typ, image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
#############
# Parameters
#############
input_path = "/home/ladvien/denoising_vae/data/extracted/"
output_path = "/home/ladvien/denoising_vae/data/train/"
threshold = 240
color_range = 30
shape_range = 40
size_range = 1
num_pepper = 20
specks_per_pepper = 10
group_range = 50
image_shape = (64, 64)
show = True
#############
# Extract
#############
clear_img_path = f"{output_path}clear/"
noise_img_path = f"{output_path}noise/"
if not os.path.exists(clear_img_path):
os.makedirs(clear_img_path)
if not os.path.exists(noise_img_path):
os.makedirs(noise_img_path)
file_paths = iu.get_image_files_recursively(input_path)
counter = 0
for file_path in file_paths:
file_name = file_path.split("/")[-1]
outpout_file_path = output_path + file_name
clear_image = cv2.imread(file_path)
clear_image = cv2.resize(clear_image, image_shape)
_, image = cv2.threshold(clear_image, 127, 255, cv2.THRESH_BINARY)
clear_image = cv2.cvtColor(clear_image, cv2.COLOR_BGR2RGB)
noise_img = clear_image.copy()
for i in range(0, num_pepper):
# Radius of circle
radius = randint(0, shape_range)
b = randint(0, color_range)
g = randint(0, color_range)
r = randint(0, color_range)
# BGR
color = (b, g, r)
# Center coordinates
y = randint(0, image_shape[1])
x = randint(0, image_shape[1])
for j in range(0, specks_per_pepper):
group_x_offset = randint(group_range*-1, group_range)
group_y_offset = randint(group_range*-1, group_range)
# Size
radius = randint(0, size_range)
noise_img = cv2.circle(noise_img, (x + group_x_offset, y + group_y_offset), radius, color, -1)
if show and counter < 16:
plt.imshow(noise_img, cmap="gray")
plt.show()
try:
file_name = f"{counter}.png"
print(f"Writing file {file_name}")
cv2.imwrite(noise_img_path + file_name, noise_img)
cv2.imwrite(clear_img_path + file_name, clear_image)
except:
print(f"Removed: {file_path}")
counter+=1
| 3,969 | 1,465 |
"""This script generates README.md based on s3_list.txt. If an S3 object's
parent folder(s) doesn't exist, this script will be smart enough to create
the parent folder(s) as separate items in output MD file.
"""
TXT_FILENAME = "processed/s3_list.txt"
MD_FILENAME = "processed/README.md"
def get_md_name(s3_name):
"""Returns the filename with leading bullet list MD format."""
path_tokens = s3_name.rstrip('/').split('/')
indentation_level = len(path_tokens)
list_prefix = indentation_level * 2 * ' ' + '* '
file_basename = path_tokens[-1]
if s3_name.endswith('/'):
return list_prefix + file_basename
else:
bucket_url = "https://cimr-d.s3.amazonaws.com"
return f"{list_prefix}[{file_basename}]({bucket_url}/{s3_name})"
def create_folders(curr_folders, prev_folders):
"""This function compares curr_folders with prev_folders,
and generates all folders that are not in prev_folders.
"""
idx = 0
end = min(len(curr_folders), len(prev_folders))
while idx < end:
if curr_folders[idx] != prev_folders[idx]:
break
idx += 1
if idx == len(curr_folders):
return
while idx < len(curr_folders):
s3_folder_path = '/'.join(curr_folders[0:(idx + 1)]) + '/'
md_name = get_md_name(s3_folder_path)
file_out.write(md_name + '\n')
idx += 1
with open(TXT_FILENAME) as file_in, open(MD_FILENAME, 'w') as file_out:
file_out.write("List of processed files (with links to AWS S3 bucket):\n")
file_out.write("----\n")
prev_folders = []
for line_in in file_in:
tokens = line_in.split()
s3_name = " ".join(tokens[4:])
md_name = get_md_name(s3_name)
if s3_name.endswith('/'):
curr_folders = s3_name.split('/')[0:-2]
create_folders(curr_folders, prev_folders)
# Do not show size and date fields for a directory
file_out.write(md_name + '\n')
else:
curr_folders = s3_name.split('/')[0:-1]
create_folders(curr_folders, prev_folders)
# For a regular file, includes size and date fields too
s3_date = tokens[0] + " " + tokens[1]
s3_size = tokens[2] + " " + tokens[3]
date_str = f" (updated on *{s3_date}*)"
size_str = ": " + s3_size
file_out.write(md_name + size_str + date_str + '\n')
prev_folders = s3_name.split('/')[0:-1]
| 2,462 | 861 |
from django_filters import rest_framework as filters
from accounts.models import (User)
from cantiin.models import (Product, Order, Comment)
from my_functions.filters_mixins import (DateTimeFilter, IdFilter)
from accounts.models import (User)
from django.db.models import Q
class UserFilter(IdFilter):
search = filters.CharFilter(method='my_custom_filter',label="Search")
class Meta:
model = User
fields = ["id", "username","search"]
def my_custom_filter(self, queryset, name, value):
return User.objects.filter(Q(username__icontains=value))
# https://stackoverflow.com/a/57270647/14819065
class ProductFilter(IdFilter, DateTimeFilter):
search = filters.CharFilter(method='my_custom_filter',label="Search")
min_price = filters.NumberFilter(field_name="price",
lookup_expr='gte')
max_price = filters.NumberFilter(field_name="price",
lookup_expr='lte')
in_stock = filters.BooleanFilter(field_name="in_stock")
class Meta:
model = Product
fields = "__all__"
def my_custom_filter(self, queryset, name, value):
return Product.objects.filter(Q(name__icontains=value))
class OrderFilter(IdFilter, DateTimeFilter):
min_amount = filters.NumberFilter(field_name="amount",
lookup_expr='gte')
max_amount = filters.NumberFilter(field_name="amount",
lookup_expr='lte')
class Meta:
model = Order
fields = "__all__"
class CommentFilter(IdFilter, DateTimeFilter):
search = filters.CharFilter(method='my_custom_filter',label="Search")
class Meta:
model = Comment
fields = "__all__"
def my_custom_filter(self, queryset, name, value):
return Comment.objects.filter(Q(content__icontains=value))
| 1,644 | 557 |
import numpy as np
class PointCloudMerger:
# FIXME
def merge_clouds(self, cloud_left, cloud_right, matches):
left_crop_min = np.min([m[0][0] for m in matches])
# left_crop_max = np.max([m[0][0] for m in matches])
# right_crop_min = np.min([m[1][0] for m in matches])
right_crop_max = np.max([m[1][0] for m in matches])
points_left = cloud_left['points'][:, :left_crop_min]
colors_left = cloud_left['colors'][:, :left_crop_min]
points_right = cloud_right['points'][:, right_crop_max:]
colors_right = cloud_right['colors'][:, right_crop_max:]
# angle = 30
# sin = math.sin(angle)
# cos = math.cos(angle)
shift = points_left[0][left_crop_min - 1][1] - points_right[0][0][1]
for k in range(0, points_right.shape[0]):
for j in range(0, points_right.shape[1]):
points_right[k][j][1] += shift
# y = y * cos - x * sin
# x = y * sin + x * cos
# points_mid_1 = p[:, left_crop_min:left_crop_max]
# colors_mid_1 = c[:, left_crop_min:left_crop_max]
#
# points_mid_2 = p[:, right_crop_min:right_crop_max]
# colors_mid_2 = c[:, right_crop_min:right_crop_max]
points = np.concatenate([points_left, points_right], axis=1)
colors = np.concatenate([colors_left, colors_right], axis=1)
return {
'points': points,
'colors': colors,
'shift': points[0][points.shape[1] - 1][1]
}
| 1,547 | 566 |
#Submitted by thr3sh0ld
#Logic: Swap half the length of string with the other half
class Solution:
def reverseString(self, s: List[str]) -> None:
for i in range(len(s)//2):
s[i], s[-i-1] = s[-i-1], s[i] | 226 | 85 |
class TLSParser:
def __init__(self):
self.input = ["","","",""]
self.next = 0
self.ready = False
self.hypernet = False
self.TLS = False
self.TLSinHypernet = False
def reset(self):
self.ready = False
self.next = 0
self.hypernet = False
self.TLS = False
self.TLSinHypernet = False
def addChar(self, c):
if c == "[" or c == "]":
self.ready = False
self.next = 0
self.hypernet = c == "["
else:
self.input[self.next] = c
if self.next == 3:
self.ready = True
self.next = 0
else:
self.next += 1
if self.ready:
if self.hypernet:
self.TLSinHypernet = self.TLSinHypernet or self.abba()
else:
self.TLS = self.TLS or self.abba()
def at(self, i):
return self.input[i%4]
def abba(self):
val = self.at(self.next) == self.at(self.next + 3) and self.at(self.next + 1) == self.at(self.next + 2) and self.at(self.next) != self.at(self.next + 1)
val = val and self.ready
return val
def endSequence(self):
val = self.TLS and not self.TLSinHypernet
self.reset()
return val
class SSLParser:
def __init__(self):
self.input = ["","",""]
self.next = 0
self.ready = False
self.hypernet = False
self.supernetC = []
self.hypernetC = []
def reset(self):
self.next = 0
self.ready = False
self.hypernet = False
self.supernetC = []
self.hypernetC = []
def addChar(self, c):
if c == "[" or c == "]":
self.ready = False
self.next = 0
self.hypernet = c == "["
else:
self.input[self.next] = c
if self.next == 2:
self.ready = True
self.next = 0
else:
self.next += 1
if self.ready:
if self.aba():
if self.hypernet:
self.hypernetC.append(self.inOrder())
else:
self.supernetC.append(self.inOrder())
def at(self, i):
return self.input[i%3]
def aba(self):
return self.at(self.next) == self.at(self.next + 2) and self.at(self.next) != self.at(self.next + 1)
def inOrder(self):
res = []
for i in range(self.next, self.next + 3):
res.append(self.at(i))
return res
def endSequence(self):
val = False
for c in self.supernetC:
inv = self.swapSequence(c)
if inv in self.hypernetC:
val = True
self.reset()
return val
def swapSequence(self,seq):
ret = []
a = seq[0]
b = seq[1]
ret.append(b)
ret.append(a)
ret.append(b)
return ret
TLS = TLSParser()
SSL = SSLParser()
output = 0
with open("input.txt", "r") as f:
for line in f:
for c in line:
TLS.addChar(c)
SSL.addChar(c)
if SSL.endSequence():
output += 1
print output
| 2,659 | 1,295 |
def approx_derivative(f, x: int, delta = 1e-6):
return (f(x + delta) - f(x - delta))/(2 * delta)
def approx_derivative_2(f, delta = 1e-6):
def new_func(x):
return (f(x + delta) - f(x - delta))/(2 * delta)
return new_func
def approx_integral(f, lo, hi, num_regions):
area = 0
h = (hi - lo)/num_regions
x = lo + h
for i in range(num_regions - 1):
area += f(x)
x += h
total_area = h * ((f(hi) + f(lo))/2 + area)
return total_area | 511 | 224 |
import unittest
from p004 import solution, is_palindrome
class MyTestCase(unittest.TestCase):
def test_is_palindrome_zero(self):
self.assertEqual(is_palindrome("o"), True)
def test_is_palindrome_positive_even(self):
self.assertEqual(is_palindrome("osso"), True)
def test_is_palindrome_positive_odd(self):
self.assertEqual(is_palindrome("laval"), True)
def test_is_palindrome_negative_even(self):
self.assertEqual(is_palindrome("oslo"), False)
def test_is_palindrome_negative_odd(self):
self.assertEqual(is_palindrome("lavas"), False)
def test_solution_zero(self):
self.assertEqual(solution(0, 0), 0)
def test_solution_example(self):
self.assertEqual(solution(10, 99), 9009)
def test_solution_answer(self):
self.assertEqual(solution(100, 999), 906609)
if __name__ == '__main__':
unittest.main()
| 907 | 340 |
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
__author__ = 'LexusLee'
"""
该模块是日志模块,负责记录整个应用的日志信息,通过继承threading.Thread类实现异步写日志,外部对象只需将日志信息交给该模块,然后直接返回,
无需等待将日志信息写到文件中,而是由该模块负责将日志信息写到文件中去,从而提高了写日志的速度
"""
import logging.config
import os
import yaml
from foundation import const
BASE_DIR = os.path.dirname(__file__)
if const.env == "development":
log_config_path = os.path.abspath(os.path.join(BASE_DIR, os.pardir, 'configureFiles/development/appLogConf.yaml'))
logging.config.dictConfig(yaml.load(open(log_config_path, 'r'))) # 加载日志配置文件
elif const.env == "production":
log_config_path = os.path.abspath(os.path.join(BASE_DIR, os.pardir, 'configureFiles/production/appLogConf.yaml'))
logging.config.dictConfig(yaml.load(open(log_config_path, 'r'))) # 加载日志配置文件
elif const.env == "test":
log_config_path = os.path.abspath(os.path.join(BASE_DIR, os.pardir, 'configureFiles/test/appLogConf.yaml'))
logging.config.dictConfig(yaml.load(open(log_config_path, 'r'))) # 加载日志配置文件
else:
print '环境错误,只能是development,production,test'
logger = logging.getLogger("intlongLogger")
| 1,097 | 522 |
from pathlib import Path
from typing import Generator, Union
import pandas as pd
from .fit import read_fit
from .gpx import read_gpx
from .tcx import read_tcx
def read_file(fpath: Union[str, Path], *args, **kwargs) -> pd.DataFrame:
"""This method tries to recognize the file type of the fpath argument by reading the file extension (suffix).
Please note that this method does not support file-like objects, in contrast to the other read_* functions of sweatpy.
Args:
fpath: str or Path object representing the path to a file.
Returns:
Returns an activity as a pandas data frames.
"""
suffix = Path(fpath).suffix.lower()
if suffix == ".tcx":
read_func = read_tcx
elif suffix == ".gpx":
read_func = read_gpx
elif suffix == ".fit":
read_func = read_fit
else:
raise ValueError(
f"Argument fpath ({fpath}) has an unsupported file extensions (suffix): {suffix}"
)
return read_func(fpath, *args, **kwargs)
def read_dir(path: Union[str, Path]) -> Generator[pd.DataFrame, None, None]:
"""Generator function that returns activities in a directory as pandas data frames.
Args:
path: str or Path object representing the path to a directory with activity files.
Yields:
Yields activities as pandas data frames.
"""
path = Path(path)
assert path.is_dir()
for f in path.iterdir():
if f.is_dir():
continue
yield read_file(f)
| 1,509 | 442 |
import argparse
import hail as hl
from gnomad.utils.vep import (
process_consequences,
filter_vep_to_canonical_transcripts,
get_most_severe_consequence_for_summary,
CSQ_CODING_HIGH_IMPACT,
CSQ_CODING_MEDIUM_IMPACT,
CSQ_CODING_LOW_IMPACT,
CSQ_NON_CODING,
)
from hail.genetics import reference_genome
from fm_insights.utils import register_log, annotate_bed
coding_high = hl.set(CSQ_CODING_HIGH_IMPACT)
coding_medium = hl.set(CSQ_CODING_MEDIUM_IMPACT)
coding_low = hl.set(CSQ_CODING_LOW_IMPACT)
non_coding = hl.set(CSQ_NON_CODING)
bed_files = {
"GRCH37": [
"gs://finemapping-insights/annotations/baselineLD_v2.2/Promoter_UCSC.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/DHSmerged_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/Roadmap_H3K27ac_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/CA_H3K27ac_Ulirsch.bed",
],
"GRCh38": [
"gs://meta-finemapping-simulation/annotations_hg38/Promoter_UCSC.bed",
"gs://meta-finemapping-simulation/annotations_hg38/DHSmerged_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/Roadmap_H3K27ac_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/CA_H3K27ac_Ulirsch.bed",
],
}
gnomad_latest_versions = {"GRCh37": "2.1.1", "GRCh38": "3.1.2"}
gnomad_v2_pops = ["afr", "amr", "asj", "eas", "fin", "nfe", "nfe_est", "nfe_nwe", "nfe_onf", "nfe_seu"]
gnomad_v3_pops = ["afr", "ami", "amr", "asj", "eas", "mid", "fin", "nfe", "oth", "sas"]
def annotate_consequence_category(csq_expr, annot_location="consequence_category"):
annot_expr = {
annot_location: hl.case()
.when(coding_high.contains(csq_expr), "coding_high")
.when(coding_medium.contains(csq_expr), "coding_medium")
.when(coding_low.contains(csq_expr), "coding_low")
.when(non_coding.contains(csq_expr), "non_coding")
.or_missing()
}
return annot_expr
def main(args):
reference_genome = args.reference_genome
if reference_genome == "GRCh37":
from gnomad.resources.grch37.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"gnomad_{pop}"]] for pop in gnomad_v2_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"gnomad"]]})
elif reference_genome == "GRCh38":
from gnomad.resources.grch38.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"{pop}-adj"]] for pop in gnomad_v3_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"adj"]]})
else:
raise ValueError("Invalid --reference-genome")
ht = ht.annotate(freq=hl.struct(**freq_expr))
ht = filter_vep_to_canonical_transcripts(ht)
ht = process_consequences(ht)
ht = get_most_severe_consequence_for_summary(ht)
# extract most severe
ht = ht.select(
freq=ht.freq,
most_severe=hl.if_else(hl.is_defined(ht.most_severe_csq), ht.most_severe_csq, "intergenic_variant"),
gene_most_severe=ht.vep.worst_csq_for_variant_canonical.gene_symbol,
lof=ht.vep.worst_csq_for_variant_canonical.lof,
hgnc_id=ht.vep.worst_csq_for_variant_canonical.hgnc_id,
hgvsp=ht.vep.worst_csq_for_variant_canonical.hgvsp,
transcript_id=ht.vep.worst_csq_for_variant_canonical.transcript_id,
polyphen_prediction=ht.vep.worst_csq_for_variant_canonical.polyphen_prediction,
polyphen_score=ht.vep.worst_csq_for_variant_canonical.polyphen_score,
sift_prediction=ht.vep.worst_csq_for_variant_canonical.sift_prediction,
sift_score=ht.vep.worst_csq_for_variant_canonical.sift_score,
protein_coding=ht.protein_coding,
)
ht = ht.select_globals()
ht = ht.annotate(**annotate_consequence_category(ht.most_severe))
ht = annotate_bed(ht, bed_files=bed_files[reference_genome], reference_genome=reference_genome)
ht = ht.annotate(
consequence=(
hl.case(missing_false=True)
.when(hl.is_defined(ht.lof) & (ht.lof != "LC"), "pLoF")
.when(
(ht.lof == "LC")
| (ht.consequence_category == "coding_high")
| (ht.consequence_category == "coding_medium"),
"Missense",
)
.when(ht.consequence_category == "coding_low", "Synonymous")
.when(ht.most_severe == "3_prime_UTR_variant", "UTR3")
.when(ht.most_severe == "5_prime_UTR_variant", "UTR5")
.when(ht.Promoter_UCSC == 1, "Promoter")
.when(
(ht.DHSmerged_Ulirsch == 1) & ((ht.Roadmap_H3K27ac_Ulirsch == 1) | (ht.CA_H3K27ac_Ulirsch == 1)), "CRE"
)
.default("Non-genic")
)
)
ht.describe()
ht = ht.checkpoint(
f"gs://meta-finemapping-simulation/gnomad/gnomad.genomes.r{gnomad_latest_versions[args.reference_genome]}.sites.most_severe.ht",
overwrite=args.overwrite,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference-genome", type=str, required=True)
parser.add_argument("--overwrite", action="store_true")
args = parser.parse_args()
register_log()
main(args)
| 5,542 | 2,266 |
from django.apps import AppConfig
class AistSurveyConfig(AppConfig):
name = 'AIST_survey'
| 96 | 33 |
"""
Helper functions used within the test suite
"""
import glob
from contextlib import suppress
from os import environ, remove, rmdir
from os.path import join, exists
from pymongo import MongoClient
def remove_file(files):
"""remove a single file if it exists."""
for f in files:
if exists(f):
remove(f)
def remove_crossmap_cache(dir, name, use_subdir=True):
"""remove any crossmap cache files for a crossmap project"""
host = environ["MONGODB_HOST"] if "MONGODB_HOST" in environ else "0.0.0.0"
port = environ["MONGODB_PORT"] if "MONGODB_PORT" in environ else 8097
client = MongoClient(host=host, port=int(port),
username="crossmap", password="crossmap")
client.drop_database(name)
crossmap_data_dir = join(dir, name) if use_subdir else dir
prefix = join(crossmap_data_dir, name)
all_filenames = glob.glob(prefix+"*")
remove_file(all_filenames)
if exists(crossmap_data_dir):
with suppress(OSError):
rmdir(crossmap_data_dir)
def remove_cachefile(dir, filename):
"""remove a specific crossmap cache file"""
filepath = join(dir, filename)
remove_file([filepath])
| 1,196 | 381 |
import _thread
import time
import requests
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 MicroMessenger/6.5.19 NetType/4G Language/zh_TW",
}
# mainUrl = 'http://ipinfo.ipidea.io'
mainUrl = 'https://api.myip.la/en?json'
username = 'zbipuser-zone-custom-region-ae'
password = 'zbip123'
def Merge(dict1, dict2):
res = {**dict1, **dict2}
return res
def testUrl(username, password):
ip = {}
entry = 'http://{}:{}@proxy.ipidea.io:2334'.format(username, password)
proxy = {
'http': entry,
'https': entry,
}
try:
res = requests.get(mainUrl, headers=headers, proxies=proxy, timeout=10)
print(res.status_code, res.text)
ipstr = str(res.json()["ip"])
except Exception as e:
ipstr = ''
print("访问失败", e)
pass
if ipstr:
ip['ip'] = ipstr
print(type(ip), ip)
port = {'port': '2333'}
print(type(port), port)
data = Merge(ip, port)
# data = dict([ip.items()] + [port.items()])
else:
data = {}
return data
ip = testUrl(username, password)
print(type(ip), ip)
port = 2333 # 默认端口号就是2333
##
##for port in range(0, 2):
## _thread.start_new_thread(testUrl, ())
## time.sleep(0.1)
##
##time.sleep(10)
| 1,464 | 588 |
# coding: utf-8
"""Tests for IPython.core.application"""
import os
import tempfile
from IPython.core.application import Application
def test_unicode_cwd():
"""Check that IPython starts with non-ascii characters in the path."""
wd = tempfile.mkdtemp(suffix=u"€")
old_wd = os.getcwdu()
os.chdir(wd)
#raise Exception(repr(os.getcwd()))
try:
app = Application()
# The lines below are copied from Application.initialize()
app.create_default_config()
app.log_default_config()
app.set_default_config_log_level()
# Find resources needed for filesystem access, using information from
# the above two
app.find_ipython_dir()
app.find_resources()
app.find_config_file_name()
app.find_config_file_paths()
# File-based config
app.pre_load_file_config()
app.load_file_config(suppress_errors=False)
finally:
os.chdir(old_wd)
def test_unicode_ipdir():
"""Check that IPython starts with non-ascii characters in the IP dir."""
ipdir = tempfile.mkdtemp(suffix=u"€")
# Create the config file, so it tries to load it.
with open(os.path.join(ipdir, 'ipython_config.py'), "w") as f:
pass
old_ipdir1 = os.environ.pop("IPYTHONDIR", None)
old_ipdir2 = os.environ.pop("IPYTHON_DIR", None)
os.environ["IPYTHONDIR"] = ipdir.encode("utf-8")
try:
app = Application()
# The lines below are copied from Application.initialize()
app.create_default_config()
app.log_default_config()
app.set_default_config_log_level()
# Find resources needed for filesystem access, using information from
# the above two
app.find_ipython_dir()
app.find_resources()
app.find_config_file_name()
app.find_config_file_paths()
# File-based config
app.pre_load_file_config()
app.load_file_config(suppress_errors=False)
finally:
if old_ipdir1:
os.environ["IPYTHONDIR"] = old_ipdir1
if old_ipdir2:
os.environ["IPYTHONDIR"] = old_ipdir2
| 2,150 | 685 |
#!/usr/bin/env python3
"""Rechunk a Zarr with chunks of size 1 in time, full size in lat/lon.
If s3fs is installed, "s3://..." arguments can be used and credentials
will be read from standard environment variables or files (see s3fs docs).
The output dataset will have the same data as the input dataset, rechunked
so that the chunks are flat time slices. That is, the chunks will have
size 1 in the time dimension and cover the full extent of the dataset in
the lat and lon dimensions.
"""
import xarray as xr
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_zarr')
parser.add_argument('output_zarr')
args = parser.parse_args()
rechunk(args.input_zarr, args.output_zarr)
def rechunk(input_path, output_path):
ds = xr.open_dataset(input_path, engine="zarr")
for var in ds:
del ds[var].encoding['chunks']
full_lat = len(ds.lat)
full_lon = len(ds.lon)
ds_rechunked = ds.chunk({'time': 1, 'lat': full_lat, 'lon': full_lon})
print('Writing output Zarr...')
ds_rechunked.to_zarr(output_path)
if __name__ == '__main__':
main()
| 1,131 | 376 |
from discord.ext import commands
from alttprbot.exceptions import SahasrahBotException
from alttprbot.alttprgen.smvaria import generate_preset
class SuperMetroidVaria(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
async def smvaria(self, ctx):
if ctx.invoked_subcommand is None:
raise SahasrahBotException(
'Try providing a valid subcommand. Use "$help smvaria" for assistance.')
@smvaria.command()
async def race(self, ctx, skills="regular", settings="default"):
seed = await generate_preset(
settings=settings,
skills=skills,
race=True
)
await ctx.send(embed=seed.embed())
@smvaria.command()
async def norace(self, ctx, skills="regular", settings="default"):
seed = await generate_preset(
settings=settings,
skills=skills,
race=False
)
await ctx.send(embed=seed.embed())
def setup(bot):
bot.add_cog(SuperMetroidVaria(bot))
| 1,056 | 315 |
#import numpy as np
class MyGrph:
def __init__(self, V_quant, E_quant):
self.V = V_quant
self.E = E_quant
self.edges_list = [[]] * (self.V)
self.reverse_edges_list = [[]] * (self.V)
def build_graph(self, f):
for i in range(0, self.E):
vertex1, vertex2 = map(int, f.readline().strip().split())
self.edges_list[vertex1 - 1] = self.edges_list[vertex1 - 1] + [vertex2 - 1]
self.reverse_edges_list[vertex2 - 1] = self.reverse_edges_list[vertex2 - 1] + [vertex1 - 1]
def dfs(self, vertex, visited_list):
vertex = vertex - 1
visited_list[vertex] = True
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l.pop()
visited_list[vrtx] = True
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
return visited_list
def check_aciclity(self):
visited_list = [False]*self.V
#for v_comp in self.edges_list[self.V]:
for v_comp in range(0, self.V):
if visited_list[v_comp] is True:
continue
check_list = [False]*self.V
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l[len(stack_l) - 1]
if visited_list[vrtx] is False:
visited_list[vrtx] = True
check_list[vrtx] = True
else:
check_list[vrtx] = False
stack_l.pop()
continue
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
if check_list[i] is True:
return -1
return 1
def topologically_sort(self):
"""
Not actually topologicall sort.
Graph has cycles in this task
"""
sorted_grph = []
cnt = self.V - 1
visited_list = [False]*self.V
for v_comp in range(0, self.V):
if visited_list[v_comp] is True:
continue
#check_list = [False]*self.V
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l[len(stack_l) - 1]
if visited_list[vrtx] is False:
visited_list[vrtx] = True
#check_list[vrtx] = True
else:
#check_list[vrtx] = False
stack_l.pop()
if not vrtx in sorted_grph:
sorted_grph.append(vrtx)
continue
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
#if check_list[i] is True:
# graph is cyclic
#return -1
return sorted_grph[::-1]
def hdag(self):
sorted_grph = self.topologically_sort()
for i in range(0, self.V - 1):
if not ((sorted_grph[i + 1] - 1) in
self.edges_list[sorted_grph[i] - 1]):
return [-1]
return [1] + sorted_grph
def scc(self):
scc_cnt = 0
sorted_grph = self.topologically_sort()
visited_list = [False]*self.V
for v_comp in sorted_grph:
if visited_list[v_comp] is True:
continue
scc_cnt = scc_cnt + 1
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l.pop()
visited_list[vrtx] = True
for i in self.reverse_edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
return scc_cnt
def conn_comp(self):
counter = 0
visited_list = [False] * self.V
while all(visited_list) is False:
for i in range(0, len(visited_list)):
if visited_list[i] is False:
visited_list = self.dfs(i + 1, visited_list)
counter = counter + 1
return counter
if __name__ == "__main__":
with open("rosalind_scc.txt", "r") as f:
vertex_quant, edges_quant = map(int, f.readline().strip().split())
grph = MyGrph(vertex_quant, edges_quant)
grph.build_graph(f)
rslt = grph.scc()
print(rslt)
with open("scc_answer.txt", "w") as f:
f.write(str(rslt))
| 4,724 | 1,515 |
#!/usr/bin/env python3
# Programm : test_tastatur.py
# Version : 1.01
# SW-Stand : 17.02.2022
# Autor : Kanopus1958
# Beschreibung : Tastur Beispiel in Python
from rwm_steuerung import color as c
from rwm_mod01 import show_header
import platform
import sys
G_OS = ('Raspbian', 'Debian')
G_HEADER_1 = '# Test Tastatur (Python-Beispi'
G_HEADER_2 = 'el) #'
if platform.system() == 'Linux':
import tty
import termios
def inkey():
fd = sys.stdin.fileno()
while True:
remember_attributes = termios.tcgetattr(fd)
tty.setraw(fd)
character = sys.stdin.read(1) # wir lesen nur einzelne zeichen
termios.tcsetattr(fd, termios.TCSADRAIN, remember_attributes)
if character == 'q':
break
if character != '\x1b' and character != '[': # x1b is ESC
sys.stdout.write(character)
sys.stdout.flush()
# print(character)
def _main():
show_header(G_HEADER_1, G_HEADER_2, __file__, G_OS)
print("\nTasteninput gestartet (Beenden mit 'q')\n")
inkey()
print()
print("\nTasteninput gestoppt\n")
if __name__ == "__main__":
_main()
| 1,193 | 436 |
/home/sheldon/anaconda3/lib/python3.6/sre_constants.py | 54 | 26 |
from nfsn import NearlyFreeSpeechService
from webfaction import WebFactionService
services= [WebFactionService, NearlyFreeSpeechService]
services_by_name= dict(((s.__name__,s) for s in services))
| 197 | 62 |
# coding: utf-8
import math
from const import CUR_FOLDER, FILE_FOLDER, FILE_TRAIN, FEATURE_NUM, K, FOLD, VALUES
def get_data(test_no):
train_data = []
test_data = []
for i in range(1, FOLD+1):
filename = CUR_FOLDER + FILE_FOLDER + FILE_TRAIN + '%i.txt' % i
with open(filename) as f:
for line in f:
instance = line.strip().split(',')
feature = instance[:FEATURE_NUM]
label = 1 if instance[-1] == 'positive' else 0
if i-1 == test_no:
test_data.append(feature + [label])
else:
train_data.append(feature + [label])
return train_data, test_data
def crossvalidation():
print 'begin crossvalidation'
k_measure = []
for k in K:
print 'current k: ', k
precision = 0.0
for i in range(FOLD):
train_data, test_data = get_data(test_no=i)
trained_labels = knn(train_data, test_data, k)
test_labels = [d[-1] for d in test_data]
_precision = measure(test_labels, trained_labels)
precision += _precision
avg_p = precision/FOLD
k_measure.append((k, avg_p))
print 'avg precision: ', avg_p
k_measure.sort(key=lambda x: x[1], reverse=True)
print 'best k trained: ', k_measure[0][0]
print 'best avg precision: ', k_measure[0][1]
return k_measure[0][0]
def measure(test_labels, trained_labels):
return 1 - float(hamming_distance(trained_labels, test_labels))/len(test_labels)
def knn(train_data, test_data, k):
labels = []
for test in test_data:
distance = []
for train in train_data:
dis = hamming_distance(train[:FEATURE_NUM], test[:FEATURE_NUM])
distance.append((dis, train[-1]))
distance.sort(key=lambda x: x[0])
label_k = [d[1] for d in distance[:k]]
label = 1 if label_k.count(1) >= label_k.count(0) else 0
labels.append(label)
return labels
def XOR(x, y):
if x != y:
return True
else:
return False
def hamming_distance(x, y):
both = zip(x, y)
return [XOR(_x, _y) for [_x, _y] in both].count(True)
if __name__ == '__main__':
crossvalidation()
| 2,267 | 772 |
import socket
import struct
# Funzione send_names chiede al utente tramite l'input il suo nome da inviare al server
def send_name():
while True:
name = input('Please enter your nickname (maximum 20 characters) --> ')
if len(name) > 0 and len(name) < 21:
return name.encode('utf-8')
def connect():
ip = input("Enter the server ip--> ")
port = 1235
multicast_group = '225.1.1.1'
multicast_port = 5007
udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )
udps.bind(('', multicast_port ))
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
udps.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
tcps = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
tcps.connect( (ip,port) )
tcps.sendall( send_name() )
except:
print("Connection cannot be established!")
quit()
return tcps, udps | 941 | 363 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Built and managed with Open Source Love by BeeHyv Software Solutions Pvt Ltd. Hyderabad
# www.beehyv.com
import uuid
from django.http import HttpResponse, JsonResponse
from rest_framework import status
from communications.CommunicationHandler import CommunicationHandler
from otpvalidation.models import Otp
from survey.models import SurveyInstance
def send_otp(request):
try:
# id send otp based on survey id
otpValue = str(uuid.uuid4()).replace('-', '')[:6]
entity = Otp()
entity.otp = otpValue
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
if worker.whatsapp_number and len(worker.whatsapp_number) > 0:
entity.phone_number = worker.whatsapp_number
else:
entity.phone_number = worker.phone_number
entity.email = worker.email
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
entity.save()
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = instance.health_worker_id.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def resend_otp(request):
try:
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
entity = Otp.objects.filter(phone_number=worker.phone_number).order_by('-id').first()
if entity:
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = worker.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def verify_otp(request):
otpValue = request.GET['otp']
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
response = {}
entity = Otp.objects.filter(phone_number=worker.whatsapp_number, otp=otpValue).first()
if not entity:
entity = Otp.objects.filter(phone_number=worker.phone_number, otp=otpValue).first()
if entity:
worker.is_consented = True
worker.save()
entity.delete()
response['verified'] = True
else:
response['verified'] = False
return JsonResponse(response) | 3,586 | 1,054 |
import ynet
from util import *
from singa.layer import Conv2D, Activation, MaxPooling2D, AvgPooling2D, Flatten, Slice, LRN
from singa import initializer
from singa import layer
from singa import loss
from singa import tensor
import cPickle as pickle
import logging
import os
import numpy as np
from numpy.core.umath_tests import inner1d
import scipy.spatial
from tqdm import trange
import time
logger = logging.getLogger(__name__)
class L2Norm(ynet.L2Norm):
def forward(self, is_train, x):
norm = np.sqrt(np.sum(x**2, axis=1) + self.epsilon)
self.y = x / norm[:, np.newaxis]
if is_train:
self.norm = norm
return self.y
def backward(self, is_train, dy):
# (b' - b * k) /norm, k = sum(dy * y)
k = np.sum(dy * self.y, axis=1)
dx = dy - self.y * k[:, np.newaxis]
dx /= self.norm[:, np.newaxis]
return dx, []
class Softmax(layer.Layer):
def __init__(self, name, input_sample_shape):
super(Softmax, self).__init__(name)
self.a = None
def forward(self, is_train, x):
assert len(x.shape) == 2, 'softmax input should be 2d-array'
a = x - np.max(x, axis=1)[:, np.newaxis]
a = np.exp(a)
a /= np.sum(a, axis=1)[:, np.newaxis]
if is_train:
self.a = a
return a
def backward(self, is_train, dy):
c = np.einsum('ij, ij->i', dy, self.a)
return self.a * (dy - c[:, np.newaxis]), []
class Aggregation(layer.Layer):
def __init__(self, name, input_sample_shape):
super(Aggregation, self).__init__(name)
self.c, h, w = input_sample_shape[0]
assert h * w == input_sample_shape[1][0], \
'# locations not match: %d vs %d' % (h * w, input_sample_shape[1][0])
self.x = None
def forward(self, is_train, xs):
x = xs[0].reshape((xs[0].shape[0], self.c, -1))
w = xs[1]
if is_train:
self.x = x
self.w = w
return np.einsum('ijk, ik -> ij', x, w)
def backward(self, is_train, dy):
dw = np.einsum('ij, ijk -> ik', dy, self.x)
dx = np.einsum('ij, ik -> ijk', dy, self.w)
return [dx, dw], []
class TagEmbedding(layer.Layer):
def __init__(self, name, num_output, input_sample_shape):
super(TagEmbedding, self).__init__(name)
self.W = tensor.Tensor((input_sample_shape[0], num_output))
#initializer.gaussian(self.W, input_sample_shape[0], num_output)
self.W.gaussian(0, 0.008)
def param_names(self):
return ['%s_weight' % self.name]
def param_values(self):
return [self.W]
def forward(self, is_train, x):
if is_train:
self.x = x
W = tensor.to_numpy(self.W)
# b = self.to_numpy(self.b)
return np.dot(x, W) # + b[np.newaxis, :]
def backward(self, is_train, dy):
dw = np.einsum('id, ij -> dj', self.x, dy)
# db = np.sum(dt, axis=0)
return [], [tensor.from_numpy(dw)]
class ProductAttention(layer.Layer):
def __init__(self, name, input_sample_shape):
super(ProductAttention, self).__init__(name)
self.c, self.h, self.w = input_sample_shape[0]
assert self.c == input_sample_shape[1][0], \
'# channels != tag embed dim: %d vs %d' % (self.c, input_sample_shape[1][0])
self.x = None
self.t = None
def forward(self, is_train, xs):
x = xs[0].reshape((xs[0].shape[0], self.c, -1))
t = xs[1]
if is_train:
self.x = x
self.t = xs[1]
return np.einsum('ijk, ij->ik', x, t)
def backward(self, is_train, dy):
dt = np.einsum('ik, ijk -> ij', dy, self.x)
dx = np.einsum('ij, ik -> ijk', self.t, dy)
return [dx, dt], []
class TagAttention(layer.Layer):
def __init__(self, name, input_sample_shape):
super(TagAttention, self).__init__(name)
self.c, self.h, self.w = input_sample_shape[0]
l = self.h * self.w
self.embed = TagEmbedding('%s_embed' % name, self.c, input_sample_shape[1])
self.attention = ProductAttention('%s_attention' % name, [input_sample_shape[0], (self.c,)])
self.softmax = Softmax('%s_softmax' % name, (l,))
self.agg = Aggregation('%s_agg' % name, [input_sample_shape[0], (l,)])
self.dev = None
def get_output_sample_shape(self):
return (self.c, )
def param_names(self):
return self.embed.param_names()
def param_values(self):
return self.embed.param_values()
def display(self, name, val):
if ynet.debug:
print('%30s = %2.8f' % (name, np.average(np.abs(val))))
def forward(self, is_train, x, output_weight=False):
if type(x[0]) == tensor.Tensor:
self.dev = x[0].device
img = tensor.to_numpy(x[0])
else:
img = x[0]
t = self.embed.forward(is_train, x[1])
if ynet.debug:
show_debuginfo(self.embed.name, t)
w = self.attention.forward(is_train, [img, t])
if ynet.debug:
show_debuginfo(self.attention.name, w)
w = self.softmax.forward(is_train, w)
if ynet.debug:
show_debuginfo(self.softmax.name, w)
y = self.agg.forward(is_train, [img, w])
if ynet.debug:
show_debuginfo(self.agg.name, y)
if output_weight:
return y, w
else:
return y
def backward(self, is_train, dy):
[dx1, dw], _ = self.agg.backward(is_train, dy)
if ynet.debug:
show_debuginfo(self.agg.name, dx1)
dw, _ = self.softmax.backward(is_train, dw)
if ynet.debug:
show_debuginfo(self.softmax.name, dw)
[dx2, dt], _ = self.attention.backward(is_train, dw)
if ynet.debug:
show_debuginfo(self.attention.name, dx2)
_, dW = self.embed.backward(is_train, dt)
dx = np.reshape(dx1 + dx2, (dx1.shape[0], self.c, self.h, self.w))
if self.dev is not None:
dx = tensor.from_numpy(dx)
dx.to_device(self.dev)
return dx, dW
class TagNIN(ynet.YNIN):
def create_net(self, name, img_size, batchsize=32):
assert self.ntag > 0, 'no tags for tag nin'
shared = []
self.add_conv(shared, 'conv1', [96, 96, 96], 11, 4, sample_shape=(3, img_size, img_size))
shared.append(MaxPooling2D('p1', 3, 2, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
self.add_conv(shared, 'conv2', [256, 256, 256], 5, 1, 2)
shared.append(MaxPooling2D('p2', 3, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
self.add_conv(shared, 'conv3', [384, 384, 384], 3, 1, 1)
shared.append(MaxPooling2D('p3', 3, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
slice_layer = Slice('slice', 0, [batchsize*self.nuser], input_sample_shape=shared[-1].get_output_sample_shape())
shared.append(slice_layer)
user = []
self.add_conv(user, 'street-conv4', [1024, 1024, 1000] , 3, 1, 1, sample_shape=slice_layer.get_output_sample_shape()[0])
user.append(AvgPooling2D('street-p4', 6, 1, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Flatten('street-flat', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(ynet.L2Norm('street-l2', input_sample_shape=user[-1].get_output_sample_shape()))
shop = []
self.add_conv(shop, 'shop-conv4', [1024, 1024, 1000], 3, 1, 1, sample_shape=slice_layer.get_output_sample_shape()[1])
shop.append(TagAttention('shop-tag',
input_sample_shape=[shop[-1].get_output_sample_shape(), (self.ntag, )]))
shop.append(L2Norm('shop-l2', input_sample_shape=shop[-1].get_output_sample_shape()))
return shared, user, shop
def forward(self, is_train, data):
t1 = time.time()
imgs, pids = data.next()
t2 = time.time()
imgs = self.put_input_to_gpu(imgs)
a, b = self.forward_layers(is_train and (not self.freeze_shared), imgs, self.shared)
a = self.forward_layers(is_train and (not self.freeze_user), a, self.user)
b = self.forward_layers(is_train and (not self.freeze_shop), b, self.shop[0:-2])
b = self.shop[-2].forward(is_train, [b, data.tag2vec(pids[a.shape[0]:])])
b = self.forward_layers(is_train and (not self.freeze_shop), b, self.shop[-1:])
loss = self.loss.forward(is_train, a, b, pids)
return loss, t2 - t1, time.time() - t2
def extract_db_feature_on_batch(self, data):
img, pid = data.next()
img = self.put_input_to_gpu(img)
fea = self.forward_layers(False, img, self.shared[0:-1] + self.shop[0:-2])
fea = self.shop[-2].forward(False, [fea, data.tag2vec(pid)])
return fea, pid
class TagVGG(TagNIN):
def create_net(self, name, img_size, batchsize=32):
assert self.ntag > 0, 'no tags for tag nin'
shared = []
shared.append(Conv2D('conv1-3x3', 96, 7, 2, pad=1, input_sample_shape=(3, img_size, img_size)))
shared.append(Activation('conv1-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(LRN('conv1-norm', size=5, alpha=5e-4, beta=0.75, k=2, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(MaxPooling2D('pool1', 3, 3, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv2', 256, 5, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1000, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv2-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(MaxPooling2D('pool2', 2, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv3', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1000, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv3-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv4', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv4-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
slice_layer = Slice('slice', 0, [batchsize*self.nuser], input_sample_shape=shared[-1].get_output_sample_shape())
shared.append(slice_layer)
user = []
user.append(Conv2D('street-conv5', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()[1]))
user.append(Activation('street-conv5-relu', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Conv2D('street-conv6', 128, 3, 2, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Activation('street-conv6-relu', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(AvgPooling2D('street-pool6', 8, 1, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Flatten('street-flat', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(ynet.L2Norm('street-l2', input_sample_shape=user[-1].get_output_sample_shape()))
shop = []
shop.append(Conv2D('shop-conv5', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()[1]))
shop.append(Activation('shop-conv5-relu', input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(Conv2D('shop-conv6', 128, 3, 2, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=0, input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(Activation('shop-conv6-relu', input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(TagAttention('shop-tag',
input_sample_shape=[shop[-1].get_output_sample_shape(), (self.ntag, )]))
shop.append(L2Norm('shop-l2', input_sample_shape=shop[-1].get_output_sample_shape()))
return shared, user, shop
| 12,386 | 4,622 |
from textwrap import dedent
import json
from gluon import current
from constants import ONE_HOUR
from versecontent import VERSECONTENT
class VERSE:
"""Get verse data of a verse.
The verse data are the linguistic feature data for the words in a verse.
"""
def __init__(self):
pass
def get(self):
"""Get linguistic data of a verse.
"""
extension = current.request.extension
Check = current.Check
Caching = current.Caching
vr = Check.field("material", "", "version")
bk = Check.field("material", "", "book")
ch = Check.field("material", "", "chapter")
vs = Check.field("material", "", "verse")
tr = Check.field("material", "", "tr")
if extension == "json":
return self.getJson(vr, bk, ch, vs)
if vs is None:
return dict(good=False, msgs=[])
return Caching.get(
f"verse_{vr}_{bk}_{ch}_{vs}_{tr}_",
lambda: self.get_c(vr, bk, ch, vs, tr),
ONE_HOUR,
)
def get_c(self, vr, bk, ch, vs, tr):
material = VERSECONTENT(
vr,
bk,
ch,
vs,
xml=None,
wordData=None,
tp="txtd",
tr=tr,
mr=None,
)
good = True
msgs = []
if len(material.wordData) == 0:
msgs = [("error", f"{bk} {ch}:{vs} does not exist")]
good = False
return dict(
good=good,
msgs=msgs,
material=material,
)
def getJson(self, vr, bk, ch, vs):
Caching = current.Caching
return Caching.get(
f"versej_{vr}_{bk}_{ch}_{vs}_",
lambda: self.getJson_c(vr, bk, ch, vs),
ONE_HOUR,
)
def getJson_c(self, vr, bk, ch, vs):
PASSAGE_DBS = current.PASSAGE_DBS
passageDb = PASSAGE_DBS[vr] if vr in PASSAGE_DBS else None
msgs = []
good = True
data = dict()
if passageDb is None:
msgs.append(("Error", f"No such version: {vr}"))
good = False
if good:
verseInfo = passageDb.executesql(
dedent(
f"""
select verse.id, verse.text from verse
inner join chapter on verse.chapter_id=chapter.id
inner join book on chapter.book_id=book.id
where
book.name = '{bk}' and
chapter.chapter_num = {ch} and
verse_num = {vs}
;
"""
)
)
if len(verseInfo) == 0:
msgs.append(("Error", f"No such verse: {bk} {ch}:{vs}"))
good = False
else:
data = verseInfo[0]
vid = data[0]
wordInfo = passageDb.executesql(
dedent(
f"""
select word.word_phono, word.word_phono_sep
from word
inner join word_verse on word_number = word_verse.anchor
inner join verse on verse.id = word_verse.verse_id
where verse.id = {vid}
order by word_number
;
"""
)
)
data = dict(
text=data[1], phonetic="".join(x[0] + x[1] for x in wordInfo)
)
return json.dumps(dict(good=good, msgs=msgs, data=data), ensure_ascii=False)
| 3,691 | 1,061 |
#!/usr/bin/env python
#
# Copyright (c) 2008-2010 Stefan Krah. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# The cdec (for "check decimal") class checks cdecimal.so against decimal.py.
# A cdec object consists of a cdecimal.Decimal and a decimal.Decimal. Every
# operation is carried out on both types. If the results don't match, an
# exception is raised.
#
# Usage: python deccheck.py [--short|--medium|--long|--all]
#
import cdecimal, decimal
import sys, inspect
import array
import random
from copy import copy
from randdec import *
EXIT_STATUS = 0
# Python 2.5 can require exorbitant amounts of memory for hashing integers.
PY25_HASH_HAVE_WARNED = 0
# Python 2.5 bug in _dlog10.
PY25_DLOG10_HAVE_WARNED = 0
py_minor = sys.version_info[1]
py_micro = sys.version_info[2]
if py_minor >= 7:
from randfloat import *
HAVE_FORMATHELPER = True
try:
from formathelper import rand_format, rand_locale
except:
HAVE_FORMATHELPER = False
if py_minor == 5 and py_micro <= 4:
# decimal.py has additional bugs, not worth sorting them out individually.
sys.stderr.write("""
#
# Error: deccheck2.py tests cdecimal against decimal.py. For several Python
# versions, deccheck2.py suppresses known bugs in decimal.py in the output.
# In versions 2.5.0 through 2.5.4, decimal.py has additional bugs which are
# not discarded, so the output is too verbose to be useful.
#
# cdecimal should work fine, nevertheless it is recommended to upgrade
# Python to version 2.5.5 or 2.6.6.
#
""")
sys.exit(1)
# Translate symbols.
deccond = {
cdecimal.Clamped: decimal.Clamped,
cdecimal.ConversionSyntax: decimal.ConversionSyntax,
cdecimal.DivisionByZero: decimal.DivisionByZero,
cdecimal.DivisionImpossible: decimal.InvalidOperation,
cdecimal.DivisionUndefined: decimal.DivisionUndefined,
cdecimal.Inexact: decimal.Inexact,
cdecimal.InvalidContext: decimal.InvalidContext,
cdecimal.InvalidOperation: decimal.InvalidOperation,
cdecimal.Overflow: decimal.Overflow,
cdecimal.Rounded: decimal.Rounded,
cdecimal.Subnormal: decimal.Subnormal,
cdecimal.Underflow: decimal.Underflow,
}
mpdcond = {
decimal.Clamped: cdecimal.Clamped,
decimal.ConversionSyntax: cdecimal.ConversionSyntax,
decimal.DivisionByZero: cdecimal.DivisionByZero,
decimal.InvalidOperation: cdecimal.DivisionImpossible,
decimal.DivisionUndefined: cdecimal.DivisionUndefined,
decimal.Inexact: cdecimal.Inexact,
decimal.InvalidContext: cdecimal.InvalidContext,
decimal.InvalidOperation: cdecimal.InvalidOperation,
decimal.Overflow: cdecimal.Overflow,
decimal.Rounded: cdecimal.Rounded,
decimal.Subnormal: cdecimal.Subnormal,
decimal.Underflow: cdecimal.Underflow
}
decround = {
cdecimal.ROUND_UP: decimal.ROUND_UP,
cdecimal.ROUND_DOWN: decimal.ROUND_DOWN,
cdecimal.ROUND_CEILING: decimal.ROUND_CEILING,
cdecimal.ROUND_FLOOR: decimal.ROUND_FLOOR,
cdecimal.ROUND_HALF_UP: decimal.ROUND_HALF_UP,
cdecimal.ROUND_HALF_DOWN: decimal.ROUND_HALF_DOWN,
cdecimal.ROUND_HALF_EVEN: decimal.ROUND_HALF_EVEN,
cdecimal.ROUND_05UP: decimal.ROUND_05UP
}
class Context(object):
"""Provides a convenient way of syncing the cdecimal and decimal contexts"""
__slots__ = ['f', 'd']
def __init__(self, mpdctx=cdecimal.getcontext()):
"""Initialization is from the cdecimal context"""
self.f = mpdctx
self.d = decimal.getcontext()
self.d.prec = self.f.prec
self.d.Emin = self.f.Emin
self.d.Emax = self.f.Emax
self.d.rounding = decround[self.f.rounding]
self.d.capitals = self.f.capitals
self.settraps([sig for sig in self.f.traps if self.f.traps[sig]])
self.setstatus([sig for sig in self.f.flags if self.f.flags[sig]])
self.d._clamp = self.f._clamp
def getprec(self):
assert(self.f.prec == self.d.prec)
return self.f.prec
def setprec(self, val):
self.f.prec = val
self.d.prec = val
def getemin(self):
assert(self.f.Emin == self.d.Emin)
return self.f.Emin
def setemin(self, val):
self.f.Emin = val
self.d.Emin = val
def getemax(self):
assert(self.f.Emax == self.d.Emax)
return self.f.Emax
def setemax(self, val):
self.f.Emax = val
self.d.Emax = val
def getround(self):
return self.d.rounding
def setround(self, val):
self.f.rounding = val
self.d.rounding = decround[val]
def getcapitals(self):
assert(self.f.capitals == self.d.capitals)
return self.f.capitals
def setcapitals(self, val):
self.f.capitals = val
self.d.capitals = val
def getclamp(self):
assert(self.f._clamp == self.d._clamp)
return self.f._clamp
def setclamp(self, val):
self.f._clamp = val
self.d._clamp = val
prec = property(getprec, setprec)
Emin = property(getemin, setemin)
Emax = property(getemax, setemax)
rounding = property(getround, setround)
clamp = property(getclamp, setclamp)
capitals = property(getcapitals, setcapitals)
def clear_traps(self):
self.f.clear_traps()
for trap in self.d.traps:
self.d.traps[trap] = False
def clear_status(self):
self.f.clear_flags()
self.d.clear_flags()
def settraps(self, lst): # cdecimal signal list
self.clear_traps()
for signal in lst:
self.f.traps[signal] = True
self.d.traps[deccond[signal]] = True
def setstatus(self, lst): # cdecimal signal list
self.clear_status()
for signal in lst:
self.f.flags[signal] = True
self.d.flags[deccond[signal]] = True
def assert_eq_status(self):
"""assert equality of cdecimal and decimal status"""
for signal in self.f.flags:
if signal == cdecimal.FloatOperation:
continue
if self.f.flags[signal] == (not self.d.flags[deccond[signal]]):
return False
return True
# We don't want exceptions so that we can compare the status flags.
context = Context()
context.Emin = cdecimal.MIN_EMIN
context.Emax = cdecimal.MAX_EMAX
context.clear_traps()
# When creating decimals, cdecimal is ultimately limited by the maximum
# context values. We emulate this restriction for decimal.py.
maxcontext = decimal.Context(
prec=cdecimal.MAX_PREC,
Emin=cdecimal.MIN_EMIN,
Emax=cdecimal.MAX_EMAX,
rounding=decimal.ROUND_HALF_UP,
capitals=1
)
maxcontext._clamp = 0
def decimal_new(value):
maxcontext.traps = copy(context.d.traps)
maxcontext.clear_flags()
dec = maxcontext.create_decimal(value)
if maxcontext.flags[decimal.Inexact] or \
maxcontext.flags[decimal.Rounded]:
dec = decimal.Decimal("NaN")
context.d.flags[decimal.InvalidOperation] = True
return dec
_exc_fmt = "\
cdecimal_sci: %s\n\
decimal_sci: %s\n\
cdecimal_eng: %s\n\
decimal_eng: %s\n"
_exc_fmt_tuple = "\
cdecimal_tuple: %s\n\
decimal_tuple: %s\n"
_exc_fmt_obj = "\
cdecimal: %s\n\
decimal: %s\n\n"
class CdecException(ArithmeticError):
def __init__(self, result, funcname, operands, fctxstr, dctxstr):
self.errstring = "Error in %s(%s" % (funcname, operands[0])
for op in operands[1:]:
self.errstring += ", %s" % op
self.errstring += "):\n\n"
if isinstance(result, cdec):
self.errstring += _exc_fmt % (str(result.mpd),
str(result.dec),
result.mpd.to_eng(),
result.dec.to_eng_string())
mpd_tuple = result.mpd.as_tuple()
dec_tuple = result.dec.as_tuple()
if mpd_tuple != dec_tuple:
self.errstring += _exc_fmt_tuple % (str(mpd_tuple),
str(dec_tuple))
else:
self.errstring += _exc_fmt_obj % (str(result[0]), str(result[1]))
self.errstring += "%s\n%s\n\n" % (fctxstr, dctxstr)
def __str__(self):
return self.errstring
class dHandlerCdec:
"""For cdec return values:
Handle known disagreements between decimal.py and cdecimal.so.
This is just a temporary measure against cluttered output.
Detection is crude and possibly unreliable."""
def __init__(self):
self.logb_round_if_gt_prec = 0
self.ulpdiff = 0
self.powmod_zeros = 0
self.total_mag_nan = 0
self.quantize_status = 0
self.max_status = 0
self.maxctx = decimal.Context(Emax=10**18, Emin=-10**18)
def default(self, result, operands):
return False
def harrison_ulp(self, dec):
"""Harrison ULP: ftp://ftp.inria.fr/INRIA/publication/publi-pdf/RR/RR-5504.pdf"""
a = dec.next_plus()
b = dec.next_minus()
return abs(a - b)
def standard_ulp(self, dec, prec):
return decimal._dec_from_triple(0, '1', dec._exp+len(dec._int)-prec)
def rounding_direction(self, x, mode):
"""Determine the effective direction of the rounding when
the exact result x is rounded according to mode.
Return -1 for downwards, 0 for undirected, 1 for upwards,
2 for ROUND_05UP."""
d = decimal
cmp = 1 if x.compare_total(d.Decimal("+0")) >= 0 else -1
if mode in (d.ROUND_HALF_EVEN, d.ROUND_HALF_UP, d.ROUND_HALF_DOWN):
return 0
elif mode == d.ROUND_CEILING:
return 1
elif mode == d.ROUND_FLOOR:
return -1
elif mode == d.ROUND_UP:
return cmp
elif mode == d.ROUND_DOWN:
return -cmp
elif mode == d.ROUND_05UP:
return 2
else:
raise ValueError("Unexpected rounding mode: %s" % mode)
def check_ulpdiff(self, exact, rounded):
# current precision
p = context.d.prec
# Convert infinities to the largest representable number + 1.
x = exact
if exact.is_infinite():
x = decimal._dec_from_triple(exact._sign, '10', context.d.Emax)
y = rounded
if rounded.is_infinite():
y = decimal._dec_from_triple(rounded._sign, '10', context.d.Emax)
# err = (rounded - exact) / ulp(rounded)
self.maxctx.prec = p * 2
t = self.maxctx.subtract(y, x)
if context.f._flags & cdecimal.DecClamped or \
context.f._flags & cdecimal.DecUnderflow:
# The standard ulp does not work in Underflow territory.
ulp = self.harrison_ulp(y)
else:
ulp = self.standard_ulp(y, p)
# Error in ulps.
err = self.maxctx.divide(t, ulp)
d = decimal
dir = self.rounding_direction(x, context.d.rounding)
if dir == 0:
if d.Decimal("-0.6") < err < d.Decimal("0.6"):
return True
elif dir == 1: # directed, upwards
if d.Decimal("-0.1") < err < d.Decimal("1.1"):
return True
elif dir == -1: # directed, downwards
if d.Decimal("-1.1") < err < d.Decimal("0.1"):
return True
else: # ROUND_05UP
if d.Decimal("-1.1") < err < d.Decimal("1.1"):
return True
print("ulp: %s error: %s exact: %s mpd_rounded: %s"
% (ulp, err, exact, rounded))
return False
def un_resolve_ulp(self, result, funcname, operands):
"""Check if results of cdecimal's exp, ln and log10 functions are
within the allowed ulp ranges. This function is only called if
context.f._allcr is 0."""
# "exact" result, double precision, half_even
self.maxctx.prec = context.d.prec * 2
op = operands[0].dec
exact = getattr(op, funcname)(context=self.maxctx)
# cdecimal's rounded result
s = str(result.mpd)
rounded = decimal.Decimal(s)
self.ulpdiff += 1
return self.check_ulpdiff(exact, rounded)
def bin_resolve_ulp(self, result, funcname, operands):
"""Check if results of cdecimal's power function are within the
allowed ulp ranges."""
# "exact" result, double precision, half_even
self.maxctx.prec = context.d.prec * 2
op1 = operands[0].dec
op2 = operands[1].dec
exact = getattr(op1, funcname)(op2, context=self.maxctx)
# cdecimal's rounded result
s = str(result.mpd)
rounded = decimal.Decimal(s)
self.ulpdiff += 1
return self.check_ulpdiff(exact, rounded)
def resolve_underflow(self, result):
"""In extremely rare cases where the infinite precision result is just
below etiny, cdecimal does not set Subnormal/Underflow. Example:
setcontext(Context(prec=21, rounding=ROUND_UP, Emin=-55, Emax=85))
Decimal("1.00000000000000000000000000000000000000000000000"
"0000000100000000000000000000000000000000000000000"
"0000000000000025").ln()
"""
if str(result.mpd) != str(result.dec):
return False # Results must be identical.
if context.f.flags[cdecimal.Rounded] and \
context.f.flags[cdecimal.Inexact] and \
context.d.flags[decimal.Rounded] and \
context.d.flags[decimal.Inexact]:
return True # Subnormal/Underflow may be missing.
return False
def exp(self, result, operands):
if result.mpd.is_nan() or result.dec.is_nan():
return False
if context.f._allcr:
return self.resolve_underflow(result)
return self.un_resolve_ulp(result, "exp", operands)
def log10(self, result, operands):
if result.mpd.is_nan() or result.dec.is_nan():
return False
if context.f._allcr:
return self.resolve_underflow(result)
return self.un_resolve_ulp(result, "log10", operands)
def ln(self, result, operands):
if result.mpd.is_nan() or result.dec.is_nan():
return False
if context.f._allcr:
return self.resolve_underflow(result)
return self.un_resolve_ulp(result, "ln", operands)
def __pow__(self, result, operands):
if operands[2] is not None: # three argument __pow__
# issue7049: third arg must fit into precision
if (operands[0].mpd.is_zero() != operands[1].mpd.is_zero()):
if (result.mpd == 0 or result.mpd == 1) and result.dec.is_nan():
if (not context.f.flags[cdecimal.InvalidOperation]) and \
context.d.flags[decimal.InvalidOperation]:
self.powmod_zeros += 1
return True
# issue7049: ideal exponent
if decimal.Decimal(str(result.mpd)) == result.dec:
return True
elif result.mpd.is_nan() or result.dec.is_nan():
return False
elif context.f.flags[cdecimal.Rounded] and \
context.f.flags[cdecimal.Inexact] and \
context.d.flags[decimal.Rounded] and \
context.d.flags[decimal.Inexact]:
# decimal.py: correctly-rounded pow()
return self.bin_resolve_ulp(result, "__pow__", operands)
else:
return False
power = __pow__
def __radd__(self, result, operands):
"""decimal.py gives preference to the first nan"""
if operands[0].mpd.is_nan() and operands[1].mpd.is_nan() and \
result.mpd.is_nan() and result.dec.is_nan():
return True
return False
__rmul__ = __radd__
# Fixed in 2.7.2.
def plus(self, result, operands):
"""special cases for zero/ROUND_FLOOR"""
if context.f.rounding == cdecimal.ROUND_FLOOR:
if operands[0].mpd.is_zero():
return True
return False
minus = __neg__ = __pos__ = plus
if py_minor <= 6:
def rotate(self, result, operands):
"""truncate excess digits before the operation"""
if len(operands[0].dec._int) > context.f.prec:
return True
return False
shift = rotate
def compare_total_mag(self, result, operands):
"""fixed in Python2.6.?"""
if operands[0].mpd.is_nan() and operands[1].mpd.is_nan() and \
abs(result.mpd) == 1 and abs(result.dec) == 1:
self.total_mag_nan += 1
return True
return False
compare_total = compare_total_mag
def logb(self, result, operands):
"""fixed in Python2.6.?"""
if context.f.flags[cdecimal.Rounded] and \
(not context.d.flags[decimal.Rounded]):
self.logb_round_if_gt_prec += 1
return True
return False
def max(self, result, operands):
if py_minor <= 5 or py_micro <= 1:
# broken in multiple ways, fixed in 2.6.2
return True
# hack, since is_nan() appears to be broken on the result
if (not result.mpd.is_nan()) and 'sNaN' in result.dec.to_eng_string():
return True
if context.f.flags[cdecimal.Subnormal] and \
(not context.d.flags[decimal.Subnormal]):
self.max_status += 1
return True
return False
max_mag = max
min = max
min_mag = max
class dHandlerObj():
"""For non-decimal return values:
Handle known disagreements between decimal.py and cdecimal.so."""
def __init__(self):
pass
def default(self, result, operands):
return False
__ge__ = __gt__ = __le__ = __lt__ = __str__ = __repr__ = default
if py_minor >= 7:
__ne__ = __eq__ = default
if py_minor <= 6:
def __eq__(self, result, operands):
"""cdecimal raises for all sNaN comparisons"""
if operands[0].mpd.is_snan() or operands[1].mpd.is_snan():
return True
return False
__ne__ = __eq__
if py_minor <= 6:
# Fixed in release26-maint, but a lot of distributed
# versions do not have the fix yet.
def is_normal(self, result, operands):
# Issue7099
if operands[0].mpd.is_normal():
return True
return False
if py_minor <= 5:
"""decimal.py uses double quotes instead of single quotes."""
def __repr__(self, result, operands):
return True
dhandler_cdec = dHandlerCdec()
def cdec_known_disagreement(result, funcname, operands):
return getattr(dhandler_cdec, funcname, dhandler_cdec.default)(result, operands)
dhandler_obj = dHandlerObj()
def obj_known_disagreement(result, funcname, operands):
return getattr(dhandler_obj, funcname, dhandler_obj.default)(result, operands)
def verify(result, funcname, operands):
"""Verifies that after operation 'funcname' with operand(s) 'operands'
result[0] and result[1] as well as the context flags have the same
values."""
global EXIT_STATUS
if result[0] != result[1] or not context.assert_eq_status():
if obj_known_disagreement(result, funcname, operands):
return # skip known disagreements
EXIT_STATUS = 1
raise CdecException(result, funcname, operands,
str(context.f), str(context.d))
class cdec(object):
"""Joins cdecimal.so and decimal.py for redundant calculations
with error checking."""
__slots__ = ['mpd', 'dec']
def __new__(cls, value=None):
self = object.__new__(cls)
self.mpd = None
self.dec = None
if value is not None:
context.clear_status()
if py_minor <= 6 and isinstance(value, float):
self.mpd = cdecimal.Decimal.from_float(value)
self.dec = decimal.Decimal.from_float(value)
else:
self.mpd = cdecimal.Decimal(value)
self.dec = decimal_new(value)
self.verify('__xnew__', (value,))
return self
def verify(self, funcname, operands):
"""Verifies that after operation 'funcname' with operand(s) 'operands'
self.mpd and self.dec as well as the context flags have the same
values."""
global EXIT_STATUS
mpdstr = str(self.mpd)
decstr = str(self.dec)
mpdstr_eng = self.mpd.to_eng_string()
decstr_eng = self.dec.to_eng_string()
mpd_tuple = self.mpd.as_tuple()
dec_tuple = self.dec.as_tuple()
if mpd_tuple != dec_tuple: # XXX
if mpd_tuple[2] == 'F' and dec_tuple[2] == 'F' and \
mpd_tuple[1] == () and dec_tuple[1] == (0,):
return
if mpdstr != decstr or mpdstr_eng != decstr_eng or mpd_tuple != dec_tuple \
or not context.assert_eq_status():
if cdec_known_disagreement(self, funcname, operands):
return # skip known disagreements
EXIT_STATUS = 1
raise CdecException(self, funcname, operands,
str(context.f), str(context.d))
def unaryfunc(self, funcname):
"unary function returning a cdec"
context.clear_status()
c = cdec()
c.mpd = getattr(self.mpd, funcname)()
c.dec = getattr(self.dec, funcname)()
c.verify(funcname, (self,))
return c
def unaryfunc_ctx(self, funcname):
"unary function returning a cdec, uses the context methods of decimal.py"
context.clear_status()
c = cdec()
c.mpd = getattr(self.mpd, funcname)()
c.dec = getattr(context.d, funcname)(self.dec)
c.verify(funcname, (self,))
return c
def obj_unaryfunc(self, funcname):
"unary function returning an object other than a cdec"
context.clear_status()
r_mpd = getattr(self.mpd, funcname)()
r_dec = getattr(self.dec, funcname)()
verify((r_mpd, r_dec), funcname, (self,))
return r_mpd
def binaryfunc(self, other, funcname):
"binary function returning a cdec"
context.clear_status()
c = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
c.mpd = getattr(self.mpd, funcname)(other_mpd)
c.dec = getattr(self.dec, funcname)(other_dec)
c.verify(funcname, (self, other))
return c
def binaryfunc_ctx(self, other, funcname):
"binary function returning a cdec, uses the context methods of decimal.py"
context.clear_status()
c = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
c.mpd = getattr(self.mpd, funcname)(other_mpd)
c.dec = getattr(context.d, funcname)(self.dec, other_dec)
c.verify(funcname, (self, other))
return c
def obj_binaryfunc(self, other, funcname):
"binary function returning an object other than a cdec"
context.clear_status()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
r_mpd = getattr(self.mpd, funcname)(other_mpd)
r_dec = getattr(self.dec, funcname)(other_dec)
verify((r_mpd, r_dec), funcname, (self, other))
return r_mpd
def ternaryfunc(self, other, third, funcname):
"ternary function returning a cdec"
context.clear_status()
c = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
third_mpd = third_dec = third
if isinstance(third, cdec):
third_mpd = third.mpd
third_dec = third.dec
c.mpd = getattr(self.mpd, funcname)(other_mpd, third_mpd)
c.dec = getattr(self.dec, funcname)(other_dec, third_dec)
c.verify(funcname, (self, other, third))
return c
def __abs__(self):
return self.unaryfunc('__abs__')
def __add__(self, other):
return self.binaryfunc(other, '__add__')
def __copy__(self):
return self.unaryfunc('__copy__')
def __deepcopy__(self, memo=None):
context.clear_status()
c = cdec()
c.mpd = self.mpd.__deepcopy__(memo)
c.dec = self.dec.__deepcopy__(memo)
c.verify('__deepcopy__', (self,))
return c
def __div__(self, other):
return self.binaryfunc(other, '__div__')
def __divmod__(self, other):
context.clear_status()
q = cdec()
r = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
q.mpd, r.mpd = self.mpd.__divmod__(other_mpd)
q.dec, r.dec = self.dec.__divmod__(other_dec, context.d)
q.verify('__divmod__', (self, other))
r.verify('__divmod__', (self, other))
return (q, r)
def __eq__(self, other):
return self.obj_binaryfunc(other, '__eq__')
def __float__(self):
if (self.mpd.is_nan() and self.dec.is_nan()):
return float("NaN")
try:
return self.obj_unaryfunc('__float__')
except ValueError:
return None
def __floordiv__(self, other):
return self.binaryfunc(other, '__floordiv__')
def __ge__(self, other):
return self.obj_binaryfunc(other, '__ge__')
def __gt__(self, other):
return self.obj_binaryfunc(other, '__gt__')
def __hash__(self):
global PY25_HASH_HAVE_WARNED
if self.mpd.is_snan() or (py_minor <= 6 and self.mpd.is_nan()):
return None # for testing
raise ValueError('Cannot hash a NaN value.')
ret = None
try: # Python 2.5 can use exorbitant amounts of memory
ret = self.obj_unaryfunc('__hash__')
except MemoryError:
if not PY25_HASH_HAVE_WARNED:
sys.stderr.write("Out of memory while hashing %s: upgrade to Python 2.6\n"
% str(self.mpd))
PY25_HASH_HAVE_WARNED = 1
return ret
def __int__(self):
# ValueError or OverflowError
if self.mpd.is_special():
return (None, None)
return self.obj_unaryfunc('__int__')
def __le__(self, other):
return self.obj_binaryfunc(other, '__le__')
def __long__(self):
# ValueError or OverflowError
if self.mpd.is_special():
return (None, None)
return self.obj_unaryfunc('__long__')
def __lt__(self, other):
return self.obj_binaryfunc(other, '__lt__')
def __mod__(self, other):
return self.binaryfunc(other, '__mod__')
def __mul__(self, other):
return self.binaryfunc(other, '__mul__')
def __ne__(self, other):
return self.obj_binaryfunc(other, '__ne__')
def __neg__(self):
return self.unaryfunc('__neg__')
def __nonzero__(self):
return self.obj_unaryfunc('__nonzero__')
def __pos__(self):
return self.unaryfunc('__pos__')
def __pow__(self, other, mod=None):
return self.ternaryfunc(other, mod, '__pow__')
def __radd__(self, other):
return self.binaryfunc(other, '__radd__')
def __rdiv__(self, other):
return self.binaryfunc(other, '__rdiv__')
def __rdivmod__(self, other):
context.clear_status()
q = cdec()
r = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
q.mpd, r.mpd = self.mpd.__rdivmod__(other_mpd)
q.dec, r.dec = self.dec.__rdivmod__(other_dec, context.d)
q.verify('__rdivmod__', (self, other))
r.verify('__rdivmod__', (self, other))
return (q, r)
# __reduce__
def __repr__(self):
self.obj_unaryfunc('__repr__')
return "cdec('" + str(self.mpd) + "')"
def __rfloordiv__(self, other):
return self.binaryfunc(other, '__rfloordiv__')
def __rmod__(self, other):
return self.binaryfunc(other, '__rmod__')
def __rmul__(self, other):
return self.binaryfunc(other, '__rmul__')
def __rsub__(self, other):
return self.binaryfunc(other, '__rsub__')
def __rtruediv__(self, other):
return self.binaryfunc(other, '__rtruediv__')
def __rpow__(self, other):
return other.__pow__(self)
def __str__(self):
self.obj_unaryfunc('__str__')
return str(self.mpd)
def __sub__(self, other):
return self.binaryfunc(other, '__sub__')
def __truediv__(self, other):
return self.binaryfunc(other, '__truediv__')
def __trunc__(self):
# ValueError or OverflowError
if self.mpd.is_special():
return (None, None)
return self.obj_unaryfunc('__trunc__')
def _apply(self):
return self.unaryfunc('_apply')
def abs(self):
return self.unaryfunc_ctx('abs')
def add(self, other):
return self.binaryfunc_ctx(other, 'add')
def adjusted(self):
return self.obj_unaryfunc('adjusted')
def canonical(self):
return self.unaryfunc('canonical')
def compare(self, other):
return self.binaryfunc(other, 'compare')
def compare_signal(self, other):
return self.binaryfunc(other, 'compare_signal')
def compare_total(self, other):
return self.binaryfunc(other, 'compare_total')
def compare_total_mag(self, other):
return self.binaryfunc(other, 'compare_total_mag')
def copy_abs(self):
return self.unaryfunc('copy_abs')
def copy_negate(self):
return self.unaryfunc('copy_negate')
def copy_sign(self, other):
return self.binaryfunc(other, 'copy_sign')
def divide(self, other):
return self.binaryfunc_ctx(other, 'divide')
def divide_int(self, other):
return self.binaryfunc_ctx(other, 'divide_int')
def divmod(self, other):
context.clear_status()
q = cdec()
r = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
q.mpd, r.mpd = self.mpd.divmod(other_mpd)
q.dec, r.dec = context.d.divmod(self.dec, other_dec)
q.verify('divmod', (self, other))
r.verify('divmod', (self, other))
return (q, r)
def exp(self):
return self.unaryfunc('exp')
def fma(self, other, third):
return self.ternaryfunc(other, third, 'fma')
# imag
# invroot
def is_canonical(self):
return self.obj_unaryfunc('is_canonical')
def is_finite(self):
return self.obj_unaryfunc('is_finite')
def is_infinite(self):
return self.obj_unaryfunc('is_infinite')
def is_nan(self):
return self.obj_unaryfunc('is_nan')
def is_normal(self):
return self.obj_unaryfunc('is_normal')
def is_qnan(self):
return self.obj_unaryfunc('is_qnan')
def is_signed(self):
return self.obj_unaryfunc('is_signed')
def is_snan(self):
return self.obj_unaryfunc('is_snan')
def is_subnormal(self):
return self.obj_unaryfunc('is_subnormal')
def is_zero(self):
return self.obj_unaryfunc('is_zero')
def ln(self):
return self.unaryfunc('ln')
def log10(self):
global PY25_DLOG10_HAVE_WARNED
try:
return self.unaryfunc('log10')
except NameError:
if not PY25_DLOG10_HAVE_WARNED:
sys.stderr.write(
"\n\n*** warning: detected known bug in decimal.py: "
"replace div_nearest with _div_nearest in _dlog10().\n\n\n")
PY25_DLOG10_HAVE_WARNED = 1
return None
def logb(self):
return self.unaryfunc('logb')
def logical_and(self, other):
return self.binaryfunc(other, 'logical_and')
def logical_invert(self):
return self.unaryfunc('logical_invert')
def logical_or(self, other):
return self.binaryfunc(other, 'logical_or')
def logical_xor(self, other):
return self.binaryfunc(other, 'logical_xor')
def max(self, other):
return self.binaryfunc(other, 'max')
def max_mag(self, other):
return self.binaryfunc(other, 'max_mag')
def min(self, other):
return self.binaryfunc(other, 'min_mag')
def min_mag(self, other):
return self.binaryfunc(other, 'min_mag')
def minus(self):
return self.unaryfunc_ctx('minus')
def multiply(self, other):
return self.binaryfunc_ctx(other, 'multiply')
def next_minus(self):
return self.unaryfunc('next_minus')
def next_plus(self):
return self.unaryfunc('next_plus')
def next_toward(self, other):
return self.binaryfunc(other, 'next_toward')
def normalize(self):
return self.unaryfunc('normalize')
def number_class(self):
return self.obj_unaryfunc('number_class')
def plus(self):
return self.unaryfunc_ctx('plus')
def power(self, other, third=None):
"ternary function returning a cdec, uses the context methods of decimal.py"
context.clear_status()
c = cdec()
other_mpd = other_dec = other
if isinstance(other, cdec):
other_mpd = other.mpd
other_dec = other.dec
third_mpd = third_dec = third
if isinstance(third, cdec):
third_mpd = third.mpd
third_dec = third.dec
c.mpd = pow(self.mpd, other_mpd, third_mpd)
c.dec = pow(self.dec, other_dec, third_dec)
c.verify('power', (self, other, third))
return c
# powmod: same as __pow__ or power with three arguments
def quantize(self, other):
return self.binaryfunc(other, 'quantize')
def radix(self):
return self.unaryfunc('radix')
# real
# reduce: same as normalize
def remainder(self, other):
return self.binaryfunc_ctx(other, 'remainder')
def remainder_near(self, other):
return self.binaryfunc(other, 'remainder_near')
def rotate(self, other):
return self.binaryfunc(other, 'rotate')
def same_quantum(self, other):
return self.obj_binaryfunc(other, 'same_quantum')
def scaleb(self, other):
return self.binaryfunc(other, 'scaleb')
def shift(self, other):
return self.binaryfunc(other, 'shift')
# sign
def sqrt(self):
return self.unaryfunc('sqrt')
def subtract(self, other):
return self.binaryfunc_ctx(other, 'subtract')
def to_eng_string(self):
return self.obj_unaryfunc('to_eng_string')
def to_integral(self):
return self.unaryfunc('to_integral')
def to_integral_exact(self):
return self.unaryfunc('to_integral_exact')
def to_integral_value(self):
return self.unaryfunc('to_integral_value')
def to_sci_string(self):
context.clear_status()
r_mpd = self.mpd.to_sci_string()
r_dec = context.d.to_sci_string(self.dec)
verify((r_mpd, r_dec), 'to_sci_string', (self,))
return r_mpd
def log(fmt, args=None):
if args:
sys.stdout.write(''.join((fmt, '\n')) % args)
else:
sys.stdout.write(''.join((str(fmt), '\n')))
sys.stdout.flush()
def test_method(method, testspecs, testfunc):
log("testing %s ...", method)
for spec in testspecs:
if 'samples' in spec:
spec['prec'] = sorted(random.sample(range(1, 101), spec['samples']))
for prec in spec['prec']:
context.prec = prec
for expts in spec['expts']:
emin, emax = expts
if emin == 'rand':
context.Emin = random.randrange(-1000, 0)
context.Emax = random.randrange(prec, 1000)
else:
context.Emin, context.Emax = emin, emax
if prec > context.Emax: continue
log(" prec: %d emin: %d emax: %d",
(context.prec, context.Emin, context.Emax))
restr_range = 9999 if context.Emax > 9999 else context.Emax+99
for rounding in sorted(decround):
context.rounding = rounding
context.capitals = random.randrange(2)
if spec['clamp'] == 2:
context.clamp = random.randrange(2)
else:
context.clamp = spec['clamp']
exprange = context.f.Emax
testfunc(method, prec, exprange, restr_range, spec['iter'])
def test_unary(method, prec, exprange, restr_range, iter):
if method in ['__int__', '__long__', '__trunc__', 'to_integral',
'to_integral_value', 'to_integral_value']:
exprange = restr_range
if py_minor == 5 and method == '__hash__':
exprange = restr_range
for a in un_close_to_pow10(prec, exprange, iter):
try:
x = cdec(a)
getattr(x, method)()
except CdecException, err:
log(err)
for a in un_close_numbers(prec, exprange, -exprange, iter):
try:
x = cdec(a)
getattr(x, method)()
except CdecException, err:
log(err)
for a in un_incr_digits_tuple(prec, exprange, iter):
try:
x = cdec(a)
getattr(x, method)()
except CdecException, err:
log(err)
if py_minor >= 7:
for a in un_randfloat():
try:
x = cdec(a)
getattr(x, method)()
except CdecException, err:
log(err)
for i in range(1000):
try:
s = randdec(prec, exprange)
x = cdec(s)
getattr(x, method)()
except CdecException, err:
log(err)
except OverflowError:
pass
try:
s = randtuple(prec, exprange)
x = cdec(s)
getattr(x, method)()
except CdecException, err:
log(err)
except OverflowError:
pass
def test_un_logical(method, prec, exprange, restr_range, iter):
for a in logical_un_incr_digits(prec, iter):
try:
x = cdec(a)
getattr(x, method)()
except CdecException, err:
log(err)
for i in range(1000):
try:
s = randdec(prec, restr_range)
x = cdec(s)
getattr(x, method)()
except CdecException, err:
log(err)
except OverflowError:
pass
def test_binary(method, prec, exprange, restr_range, iter):
if method in ['__pow__', '__rpow__', 'power']:
exprange = restr_range
for a, b in bin_close_to_pow10(prec, exprange, iter):
try:
x = cdec(a)
y = cdec(b)
getattr(x, method)(y)
except CdecException, err:
log(err)
for a, b in bin_close_numbers(prec, exprange, -exprange, iter):
try:
x = cdec(a)
y = cdec(b)
getattr(x, method)(y)
except CdecException, err:
log(err)
for a, b in bin_incr_digits(prec, exprange, iter):
try:
x = cdec(a)
y = cdec(b)
getattr(x, method)(y)
except CdecException, err:
log(err)
if py_minor >= 7:
for a, b in bin_randfloat():
try:
x = cdec(a)
y = cdec(b)
getattr(x, method)(y)
except CdecException, err:
log(err)
for i in range(1000):
s1 = randdec(prec, exprange)
s2 = randdec(prec, exprange)
try:
x = cdec(s1)
y = cdec(s2)
getattr(x, method)(y)
except CdecException, err:
log(err)
def test_bin_logical(method, prec, exprange, restr_range, iter):
for a, b in logical_bin_incr_digits(prec, iter):
try:
x = cdec(a)
y = cdec(b)
getattr(x, method)(y)
except CdecException, err:
log(err)
for i in range(1000):
s1 = randdec(prec, restr_range)
s2 = randdec(prec, restr_range)
try:
x = cdec(s1)
y = cdec(s2)
getattr(x, method)(y)
except CdecException, err:
log(err)
def test_ternary(method, prec, exprange, restr_range, iter):
if method in ['__pow__', 'power']:
exprange = restr_range
for a, b, c in tern_close_numbers(prec, exprange, -exprange, iter):
try:
x = cdec(a)
y = cdec(b)
z = cdec(c)
getattr(x, method)(y, z)
except CdecException, err:
log(err)
for a, b, c in tern_incr_digits(prec, exprange, iter):
try:
x = cdec(a)
y = cdec(b)
z = cdec(c)
getattr(x, method)(y, z)
except CdecException, err:
log(err)
if py_minor >= 7:
for a, b, c in tern_randfloat():
try:
x = cdec(a)
y = cdec(b)
z = cdec(c)
getattr(x, method)(y, z)
except CdecException, err:
log(err)
for i in range(1000):
s1 = randdec(prec, 2*exprange)
s2 = randdec(prec, 2*exprange)
s3 = randdec(prec, 2*exprange)
try:
x = cdec(s1)
y = cdec(s2)
z = cdec(s3)
getattr(x, method)(y, z)
except CdecException, err:
log(err)
def test_format(method, prec, exprange, restr_range, iter):
for a in un_incr_digits_tuple(prec, restr_range, iter):
context.clear_status()
try:
fmt = rand_format(chr(random.randrange(32, 128)))
x = format(context.f.create_decimal(a), fmt)
y = format(context.d.create_decimal(a), fmt)
except Exception, err:
print err, fmt
continue
if x != y:
print context.f
print context.d
print "\n%s %s" % (a, fmt)
print "%s %s\n" % (x, y)
for i in range(1000):
context.clear_status()
try:
a = randdec(99, 9999)
fmt = rand_format(chr(random.randrange(32, 128)))
x = format(context.f.create_decimal(a), fmt)
y = format(context.d.create_decimal(a), fmt)
except Exception, err:
print err, fmt
continue
if x != y:
print context.f
print context.d
print "\n%s %s" % (a, fmt)
print "%s %s\n" % (x, y)
def test_locale(method, prec, exprange, restr_range, iter):
for a in un_incr_digits_tuple(prec, restr_range, iter):
context.clear_status()
try:
fmt = rand_locale()
x = format(context.f.create_decimal(a), fmt)
y = format(context.d.create_decimal(a), fmt)
except Exception, err:
print err, fmt
continue
if x != y:
print context.f
print context.d
print locale.getlocale(locale.LC_NUMERIC)
print "%s %s" % (a, fmt)
print array.array('b', x)
print array.array('b', y)
for i in range(1000):
context.clear_status()
try:
a = randdec(99, 9999)
fmt = rand_locale()
x = format(context.f.create_decimal(a), fmt)
y = format(context.d.create_decimal(a), fmt)
except Exception, err:
print err, fmt
continue
if x != y:
print context.f
print context.d
print locale.getlocale(locale.LC_NUMERIC)
print "%s %s" % (a, fmt)
print array.array('b', x)
print array.array('b', y)
def test_from_float(method, prec, exprange, restr_range, iter):
exprange = 384
for i in range(1000):
context.clear_status()
intpart = str(random.randrange(100000000000000000000000000000000000000))
fracpart = str(random.randrange(100000000000000000000000000000000000000))
exp = str(random.randrange(-384, 384))
fstring = intpart + '.' + fracpart + 'e' + exp
f = float(fstring)
try:
c = cdec(f)
except CdecException, err:
log(err)
def assert_eq_status(c, d):
"""assert equality of cdecimal and decimal status"""
for signal in c.flags:
if signal == cdecimal.FloatOperation:
continue
if c.flags[signal] == (not d.flags[deccond[signal]]):
return False
return True
def test_quantize_api(method, prec, exprange, restr_range, iter):
for a in un_incr_digits(prec, restr_range, 1):
emax = random.randrange(exprange)
emin = random.randrange(-exprange, 0)
clamp = random.randrange(2)
exp = randdec(2*prec, exprange)
for rounding in sorted(decround):
try:
c = cdecimal.Context(prec=prec, Emax=emax, Emin=emin, clamp=clamp, traps=[])
d = decimal.Context(prec=prec, Emax=emax, Emin=emin, _clamp=clamp, traps=[])
x = cdecimal.Decimal(a)
y = cdecimal.Decimal(exp)
cresult = x.quantize(y, rounding, c)
u = decimal.Decimal(a)
v = decimal.Decimal(exp)
dresult = u.quantize(v, decround[rounding], d)
except Exception, err:
print(err)
continue
if str(cresult) != str(dresult) or \
not assert_eq_status(c, d):
print("%s\n%s\n" % (c, d))
print("x: %s\ny: %s\nu: %s\nv: %s\n" % (x, y, u, v))
print("a: %s exp: %s\n" % (a, exp))
print("cresult: %s\ndresult: %s\n" % (cresult, dresult))
for i in range(1000):
a = randdec(prec, 9999)
prec = random.randrange(1, 50)
emax = random.randrange(exprange)
emin = random.randrange(-exprange, 0)
clamp = random.randrange(2)
exp = randdec(2*prec, exprange)
for rounding in sorted(decround):
try:
c = cdecimal.Context(prec=prec, Emax=emax, Emin=emin, clamp=clamp, traps=[])
d = decimal.Context(prec=prec, Emax=emax, Emin=emin, _clamp=clamp, traps=[])
x = cdecimal.Decimal(a)
y = cdecimal.Decimal(exp)
cresult = x.quantize(context=c, exp=y, rounding=rounding)
u = decimal.Decimal(a)
v = decimal.Decimal(exp)
dresult = u.quantize(context=d, exp=v, rounding=decround[rounding])
except Exception, err:
print(err)
continue
if str(cresult) != str(dresult) or \
not assert_eq_status(c, d):
print("%s\n%s\n" % (c, d))
print("x: %s\ny: %s\nu: %s\nv: %s\n" % (x, y, u, v))
print("a: %s exp: %s\n" % (a, exp))
print("cresult: %s\ndresult: %s\n" % (cresult, dresult))
if __name__ == '__main__':
import time
randseed = int(time.time())
random.seed(randseed)
base_expts = [(cdecimal.MIN_EMIN, cdecimal.MAX_EMAX)]
if cdecimal.MAX_EMAX == 999999999999999999:
base_expts.append((-999999999, 999999999))
base = {
'name': 'base',
'expts': base_expts,
'prec': [],
'clamp': 2,
'iter': None,
'samples': None,
}
small = {
'name': 'small',
'prec': [1, 2, 3, 4, 5],
'expts': [(-1,1), (-2,2), (-3,3), (-4,4), (-5,5)],
'clamp': 2,
'iter': None
}
ieee = [
{'name': 'decimal32', 'prec': [7], 'expts': [(-95, 96)], 'clamp': 1, 'iter': None},
{'name': 'decimal64', 'prec': [16], 'expts': [(-383, 384)], 'clamp': 1, 'iter': None},
{'name': 'decimal128', 'prec': [34], 'expts': [(-6143, 6144)], 'clamp': 1, 'iter': None}
]
if '--medium' in sys.argv:
base['expts'].append(('rand', 'rand'))
base['samples'] = None
testspecs = [small] + ieee + [base]
if '--long' in sys.argv:
base['expts'].append(('rand', 'rand'))
base['samples'] = 5
testspecs = [small] + ieee + [base]
elif '--all' in sys.argv:
base['expts'].append(('rand', 'rand'))
base['samples'] = 100
testspecs = [small] + ieee + [base]
else: # --short
rand_ieee = random.choice(ieee)
base['iter'] = small['iter'] = rand_ieee['iter'] = 1
base['samples'] = 1
base['expts'] = [random.choice(base_expts)]
prec = random.randrange(1, 6)
small['prec'] = [prec]
small['expts'] = [(-prec, prec)]
testspecs = [small, rand_ieee, base]
all_decimal_methods = set(dir(cdecimal.Decimal) + dir(decimal.Decimal))
all_cdec_methods = [m for m in dir(cdec) if m in all_decimal_methods]
untested_methods = [m for m in all_decimal_methods if not (m in all_cdec_methods)]
unary_methods = []
binary_methods = []
ternary_methods = []
for m in all_cdec_methods:
try:
l = len(inspect.getargspec(getattr(cdec, m))[0])
except TypeError:
continue
if l == 1:
unary_methods.append(m)
elif l == 2:
binary_methods.append(m)
elif l == 3:
ternary_methods.append(m)
else:
raise ValueError((m, l))
unary_methods.append('__deepcopy__')
binary_methods.remove('__deepcopy__')
binary_methods.remove('__new__')
binary_methods.append('power')
untested_methods.remove('from_float')
if py_minor < 6:
unary_methods.remove('__trunc__')
for elem in ['__ge__', '__gt__', '__le__', '__lt__']:
binary_methods.remove(elem)
untested_methods.sort()
unary_methods.sort()
binary_methods.sort()
ternary_methods.sort()
log("\nRandom seed: %d\n\n", randseed)
log("Skipping tests: \n\n%s\n", untested_methods)
for method in unary_methods:
test_method(method, testspecs, test_unary)
for method in binary_methods:
test_method(method, testspecs, test_binary)
for method in ternary_methods:
test_method(method, testspecs, test_ternary)
test_method('logical_invert', testspecs, test_un_logical)
for method in ['logical_and', 'logical_or', 'logical_xor']:
test_method(method, testspecs, test_bin_logical)
test_method('quantize_api', testspecs, test_quantize_api)
if HAVE_FORMATHELPER and py_minor >= 7:
# Some tests will fail with 2.6, since alignment has been changed
# in decimal.py 2.7.
test_method('format', testspecs, test_format)
test_method('locale', testspecs, test_locale)
test_method('from_float', testspecs, test_from_float)
sys.exit(EXIT_STATUS)
| 53,063 | 17,701 |
import cv2
import numpy as np
from joblib import load as model_load
from api.services.file_select import FileTask
def preprocess_images(image_files, img_size):
images = []
for file in image_files:
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (img_size, img_size))
img = img.astype('float32')
img = cv2.bilateralFilter(img, 9, 50, 50)
# #image = cv2.normalize(image, np.zeros((img_size, img_size)), 0, 1, cv2.NORM_MINMAX)
images.append(img)
images = np.asarray(images)
images = images.reshape((images.shape[0], -1))
return images
def identify(images, module):
model = model_load(f"/app/models/{module}/umap_identify.joblib")
embeddings = model.transform(images)
return embeddings
def classify(images, module):
model = model_load(f"/app/models/{module}/umap_classify.joblib")
embeddings = model.transform(images)
return embeddings
class UMAP:
IMG_SIZE = 256
def transform(self, files, module, task):
images = preprocess_images(files, self.IMG_SIZE)
if task == FileTask.IDENTIFICATION.value: return identify(images, module)
if task == FileTask.CLASSIFICATION.value : return classify(images, module)
else: raise NotImplementedError(f"UMAP has not implemented task: {task}")
| 1,362 | 466 |
# -*- coding: utf-8 -*-
# @Time : 2020/9/9-22:50
# @Author : TuringEmmy
# @Email : yonglonggeng@163.com
# @WeChat : csy_lgy
# @File : support_vector_machine.py
# @Project : Sep-Dragon
# *************************************************
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
class SVM:
def __init__(self, max_iter=100, kernel='linear'):
self.max_iter = max_iter
self._kernel = kernel
def init_args(self, features, labels):
self.m, self.n = features.shape
self.X = features
self.Y = labels
self.b = 0.0
# 将Ei保存在一个列表里
self.alpha = np.ones(self.m)
self.E = [self._E(i) for i in range(self.m)]
# 松弛变量
self.C = 1.0
def _KKT(self, i):
y_g = self._g(i) * self.Y[i]
if self.alpha[i] == 0:
return y_g >= 1
elif 0 < self.alpha[i] < self.C:
return y_g == 1
else:
return y_g <= 1
# g(x)预测值,输入xi(X[i])
def _g(self, i):
r = self.b
for j in range(self.m):
r += self.alpha[j] * self.Y[j] * self.kernel(self.X[i],
self.X[j]) # 对于输入的一行数据,分别对应原数据按位置乘再加和,在之后再进行累加,即为预测值
return r
# 核函数
def kernel(self, x1, x2):
if self._kernel == 'linear':
return sum([x1[k] * x2[k] for k in range(self.n)])
elif self._kernel == 'poly':
return (sum([x1[k] * x2[k] for k in range(self.n)]) + 1) ** 2
return 0
# E(x)为g(x)对输入x的预测值和y的差
def _E(self, i):
return self._g(i) - self.Y[i]
def _init_alpha(self):
# 外层循环首先遍历所有满足0<a<C的样本点,检验是否满足KKT
index_list = [i for i in range(self.m) if 0 < self.alpha[i] < self.C]
# 否则遍历整个训练集
non_satisfy_list = [i for i in range(self.m) if i not in index_list]
index_list.extend(non_satisfy_list)
for i in index_list:
if self._KKT(i):
continue
E1 = self.E[i]
# 如果E2是+,选择最小的;如果E2是负的,选择最大的
if E1 >= 0:
j = min(range(self.m), key=lambda x: self.E[x])
else:
j = max(range(self.m), key=lambda x: self.E[x])
return i, j
def _compare(self, _alpha, L, H):
if _alpha > H:
return H
elif _alpha < L:
return L
else:
return _alpha
def fit(self, features, labels):
self.init_args(features, labels)
for t in range(self.max_iter):
# train
i1, i2 = self._init_alpha()
# 边界
if self.Y[i1] == self.Y[i2]:
L = max(0, self.alpha[i1] + self.alpha[i2] - self.C)
H = min(self.C, self.alpha[i1] + self.alpha[i2])
else:
L = max(0, self.alpha[i2] - self.alpha[i1])
H = min(self.C, self.C + self.alpha[i2] - self.alpha[i1])
E1 = self.E[i1]
E2 = self.E[i2]
# eta=K11+K22-2K12
eta = self.kernel(self.X[i1], self.X[i1]) + self.kernel(
self.X[i2],
self.X[i2]) - 2 * self.kernel(self.X[i1], self.X[i2])
if eta <= 0:
# print('eta <= 0')
continue
alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (
E1 - E2) / eta # 此处有修改,根据书上应该是E1 - E2,书上130-131页
alpha2_new = self._compare(alpha2_new_unc, L, H)
alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (
self.alpha[i2] - alpha2_new)
b1_new = -E1 - self.Y[i1] * self.kernel(self.X[i1], self.X[i1]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i1]) * (alpha2_new - self.alpha[i2]) + self.b
b2_new = -E2 - self.Y[i1] * self.kernel(self.X[i1], self.X[i2]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i2]) * (alpha2_new - self.alpha[i2]) + self.b
if 0 < alpha1_new < self.C:
b_new = b1_new
elif 0 < alpha2_new < self.C:
b_new = b2_new
else:
# 选择中点
b_new = (b1_new + b2_new) / 2
# 更新参数
self.alpha[i1] = alpha1_new
self.alpha[i2] = alpha2_new
self.b = b_new
self.E[i1] = self._E(i1)
self.E[i2] = self._E(i2)
return 'train done!'
def predict(self, data):
r = self.b
for i in range(self.m):
r += self.alpha[i] * self.Y[i] * self.kernel(data, self.X[i])
return 1 if r > 0 else -1
def score(self, X_test, y_test):
right_count = 0
for i in range(len(X_test)):
result = self.predict(X_test[i])
if result == y_test[i]:
right_count += 1
return right_count / len(X_test)
def _weight(self):
# linear model
yx = self.Y.reshape(-1, 1) * self.X
self.w = np.dot(yx.T, self.alpha)
return self.w
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = [
'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
]
data = np.array(df.iloc[:100:10, [0, 1, -1]])
for i in range(len(data)):
if data[i, -1] == 0:
data[i, -1] = -1
# print(data)
return data[:, :2], data[:, -1]
if __name__ == '__main__':
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# plt.scatter(X[:50, 0], X[:50, 1], label='0')
# plt.scatter(X[50:, 0], X[50:, 1], label='1')
# plt.legend()
# plt.show()
svm = SVM(max_iter=200)
status = svm.fit(X_train, y_train)
print(status)
res = svm.score(X_test, y_test)
print(res)
| 6,099 | 2,492 |
# Copyright 2019 Intel Corporation.
import numpy as np
import plaidml2 as plaidml
import plaidml2.settings as plaidml_settings
from plaidml2.ffi import ForeignObject, decode_str, ffi, ffi_call, lib
def __init():
ffi_call(lib.plaidml_exec_init)
ffi.init_once(__init, 'plaidml_exec_init')
def list_devices():
ndevices = ffi_call(lib.plaidml_device_list_count)
raw_devices = ffi.new('plaidml_string*[]', ndevices)
ffi_call(lib.plaidml_device_list, ndevices, raw_devices)
return [decode_str(x) for x in raw_devices]
def list_targets():
ntargets = ffi_call(lib.plaidml_target_list_count)
raw_targets = ffi.new('plaidml_string*[]', ntargets)
ffi_call(lib.plaidml_target_list, ntargets, raw_targets)
return [decode_str(x) for x in raw_targets]
class Executable(ForeignObject):
__ffi_del__ = lib.plaidml_executable_free
def __init__(self, program, inputs, device=None, target=None):
if device is None:
device = plaidml_settings.get('PLAIDML_DEVICE')
if target is None:
target = plaidml_settings.get('PLAIDML_TARGET')
def make_buffer(tensor):
# convert LogicalShape into TensorShape
return plaidml.Buffer(device, tensor.shape.into_TensorShape())
self._input_bindings = [(x, make_buffer(x)) for x in inputs]
self._output_bindings = [(x, make_buffer(x)) for x in program.outputs]
self._inputs = [x[1] for x in self._input_bindings]
self._outputs = [x[1] for x in self._output_bindings]
def wrap(x, y):
return ffi.new('plaidml_binding*', [x.as_ptr(), y.as_ptr()])
inputs = [wrap(x, y) for x, y in self._input_bindings]
outputs = [wrap(x, y) for x, y in self._output_bindings]
ffi_obj = ffi_call(
lib.plaidml_compile,
program.as_ptr(),
device.encode(),
target.encode(),
len(inputs),
inputs,
len(outputs),
outputs,
)
super(Executable, self).__init__(ffi_obj)
def __call__(self, inputs):
for buffer, ndarray in zip(self._inputs, inputs):
# Cast the input data type to match the dtype expected by the placeholder buffer
ndarray = np.array(ndarray, dtype=buffer.shape.dtype.into_numpy())
buffer.copy_from_ndarray(ndarray)
ffi_call(lib.plaidml_executable_run, self.as_ptr())
return self._outputs
def run(program, inputs, device=None, target=None):
exe = Executable(program, [x for x, y in inputs], device=device, target=target)
return [x.as_ndarray() for x in exe([y for x, y in inputs])]
| 2,668 | 916 |
#!/usr/bin/env python
"""
Unittest for the LLA module VolumeLLA
"""
from ConfigParser import ConfigParser
import datetime
import os
import sys
import time
import unittest
from afs.tests.BaseTest import parse_commandline
import afs.lla.VolServerLLA
import afs.lla.VLDBLLA
import afs
class EvaluateTestResults(unittest.TestCase) :
"""
evaluate results
"""
def eval_vos_examine(self, res) :
self.assertEqual(res.vid, self.volume.vid)
self.assertEqual(res.servername, self.volume.servername)
self.assertEqual(res.partition, self.volume.partition)
return
def eval_vos_move(self, res) :
self.assertEqual(res, self.tmp_volume)
return
def eval_vos_release(self, res, res_ro, before_date) :
self.assertEqual(res, True)
self.assertTrue(res_ro.creation_date >= before_date)
return
def eval_vos_set_blockquota(self, res, expected_quota) :
self.assertEqual(res.maxquota, expected_quota)
return
def eval_vos_dump(self, res) :
self.assertEqual(res, True)
return
def eval_vos_restore(self, res) :
self.assertEqual(res, self.tmp_volume)
return
def eval_vos_convert(self, res) :
self.assertEqual(res, self.tmp_volume)
return
def eval_vos_create(self, res) :
self.assertEqual(res, True)
return
def eval_vos_remove(self, res) :
self.assertEqual(res, True)
return
class TestVolServerLLAMethods(EvaluateTestResults) :
"""
Tests VolumeLLA Methods
"""
@classmethod
def setUpClass(self) :
"""
setup test environment
called automagically
"""
self.lla = afs.lla.VolServerLLA.VolServerLLA()
self.vldb_lla = afs.lla.VLDBLLA.VLDBLLA()
self.test_config = ConfigParser()
self.test_config.read(afs.CONFIG.setup)
self.fileserver = self.test_config.get("VolServerLLA", "FS")
self.part = self.test_config.get("VolServerLLA", "Part")
self.volume = afs.model.Volume.Volume()
self.volume.vid = int(self.test_config.get("VolServerLLA", "VolID"))
self.volume.servername = self.test_config.get("VolServerLLA", "FS")
self.volume.partition = self.test_config.get("VolServerLLA", "Part")
self.ro_volume = afs.model.Volume.Volume()
self.ro_volume.vid = int(self.test_config.get("VolServerLLA", "ROVolID"))
self.ro_volume.servername = self.test_config.get("VolServerLLA", "FS")
self.ro_volume.partition = self.test_config.get("VolServerLLA", "Part")
self.dump_file = self.test_config.get("VolServerLLA", "DumpFile")
self.tmp_volume = afs.model.Volume.Volume()
self.tmp_volume.name = self.test_config.get("VolServerLLA", "TmpVolName")
self.tmp_volume.servername = self.volume.servername
self.tmp_volume.partition = self.volume.partition
self.dst_server = self.test_config.get("VolServerLLA", "DST_FS")
self.dst_partition = self.test_config.get("VolServerLLA", "DST_Part")
return
def test_vos_examine(self) :
res = self.lla.examine(self.volume)
self.eval_vos_examine(res)
return
def test_vos_dump_restore_remove(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
res = self.lla.dump(self.volume, self.dump_file)
self.eval_vos_dump(res)
res = self.lla.restore(self.tmp_volume, self.dump_file)
self.eval_vos_restore(res)
os.unlink(self.dump_file)
res = self.lla.remove(self.tmp_volume)
self.eval_vos_remove(res)
return
def test_vos_create_remove(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
res = self.lla.create(self.tmp_volume)
self.eval_vos_create(res)
res = self.lla.remove(self.tmp_volume)
self.eval_vos_remove(res)
return
def test_vos_release(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
before_date = datetime.datetime.now()
# before_date has a higher resolution than creation_date of the ro.
# thus we need to wait to make sure before_Dat is smaller.
time.sleep(1)
res = self.lla.release(self.volume)
res_ro = self.lla.examine(self.ro_volume)
self.eval_vos_release(res, res_ro, before_date)
return
def test_vos_set_blockquota(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
saved_vol = self.lla.examine(self.volume)
res = self.lla.set_blockquota(self.volume, 1000)
self.eval_vos_set_blockquota(res, 1000)
res = self.lla.set_blockquota(self.volume, saved_vol.maxquota)
self.eval_vos_set_blockquota(res, saved_vol.maxquota)
return
def test_vos_convert(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
res = self.lla.create(self.tmp_volume)
res = self.vldb_lla.addsite(self.tmp_volume)
res = self.lla.release(self.tmp_volume)
res = self.lla.remove(self.tmp_volume)
res = self.lla.convert(self.tmp_volume)
self.eval_vos_convert(res)
res = self.lla.remove(self.tmp_volume)
self.eval_vos_remove(res)
return
def test_vos_move(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
res = self.lla.create(self.tmp_volume)
res = self.lla.move(self.tmp_volume, self.dst_server, self.dst_partition)
self.eval_vos_move(res)
res = self.lla.remove(res)
return
class TestVolServerLLAMethods_async(EvaluateTestResults):
"""
Tests VolServerLLA Methods
"""
@classmethod
def setUpClass(self) :
"""
setup test environment
called automagically
"""
self.lla = afs.lla.VolServerLLA.VolServerLLA()
self.vldb_lla = afs.lla.VLDBLLA.VLDBLLA()
self.test_config = ConfigParser()
self.test_config.read(afs.CONFIG.setup)
self.fileserver = self.test_config.get("VolServerLLA", "FS")
self.part = self.test_config.get("VolServerLLA", "Part")
self.dump_file = self.test_config.get("VolServerLLA", "DumpFile")
self.volume = afs.model.Volume.Volume()
self.volume.vid = int(self.test_config.get("VolServerLLA", "VolID"))
self.volume.servername = self.test_config.get("VolServerLLA", "FS")
self.volume.partition = self.test_config.get("VolServerLLA", "Part")
self.ro_volume = afs.model.Volume.Volume()
self.ro_volume.vid = int(self.test_config.get("VolServerLLA", "ROVolID"))
self.ro_volume.servername = self.test_config.get("VolServerLLA", "FS")
self.ro_volume.partition = self.test_config.get("VolServerLLA", "Part")
self.tmp_volume = afs.model.Volume.Volume()
self.tmp_volume.name = self.test_config.get("VolServerLLA", "TmpVolName")
self.tmp_volume.servername = self.volume.servername
self.tmp_volume.partition = self.volume.partition
self.dst_server = self.test_config.get("VolServerLLA", "DST_FS")
self.dst_partition = self.test_config.get("VolServerLLA", "DST_Part")
return
def test_vos_examine(self) :
sp_ident = self.lla.examine(self.volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_examine(res)
return
def test_vos_dump_restore_remove(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
sp_ident = self.lla.dump(self.volume, self.dump_file, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_dump(res)
sp_ident = self.lla.restore(self.tmp_volume, self.dump_file, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_restore(res)
os.unlink(self.dump_file)
sp_ident = self.lla.remove(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_remove(res)
return
def test_vos_create_remove(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
sp_ident = self.lla.create(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_create(res)
sp_ident = self.lla.remove(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_remove(res)
return
def test_vos_release(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
before_date = datetime.datetime.now()
# before_date has a higher resolution than creation_date of the ro.
# thus we need to wait to make sure before_Dat is smaller.
time.sleep(1)
sp_ident = self.lla.release(self.volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
sp_ident = self.lla.examine(self.ro_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res_ro = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_release(res, res_ro, before_date)
return
def test_vos_set_blockquota(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
sp_ident = self.lla.examine(self.volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
saved_vol = self.lla.get_subprocess_result(sp_ident)
sp_ident = self.lla.set_blockquota(self.volume, 1000, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_set_blockquota(res, 1000)
sp_ident = self.lla.set_blockquota(self.volume, saved_vol.maxquota, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.eval_vos_set_blockquota(res, saved_vol.maxquota)
return
def test_vos_convert(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
sp_ident = self.lla.create(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
# note the different lla for addsite!
sp_ident = self.vldb_lla.addsite(self.tmp_volume, async=True)
self.vldb_lla.wait_for_subprocess(sp_ident)
res = self.vldb_lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
sp_ident = self.lla.release(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
sp_ident = self.lla.remove(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
sp_ident = self.lla.convert(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
self.eval_vos_convert(res)
sp_ident = self.lla.remove(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
self.eval_vos_remove(res)
return
def test_vos_move(self) :
if not afs.CONFIG.enable_modifying_tests :
raise unittest.SkipTest("modifying tests disabled.")
sp_ident = self.lla.create(self.tmp_volume, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
sp_ident = self.lla.move(self.tmp_volume, self.dst_server, self.dst_partition, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
self.assertTrue(res != None)
self.eval_vos_move(res)
sp_ident = self.lla.remove(res, async=True)
self.lla.wait_for_subprocess(sp_ident)
res = self.lla.get_subprocess_result(sp_ident)
return
if __name__ == '__main__' :
parse_commandline()
sys.stderr.write("\n===\n=== testing direct fork ===\n===\n\n")
suite = unittest.TestLoader().loadTestsFromTestCase(TestVolServerLLAMethods)
unittest.TextTestRunner(verbosity = 2).run(suite)
sys.stderr.write("\n===\n=== testing detached execution ===\n===\n\n")
suite = unittest.TestLoader().loadTestsFromTestCase(TestVolServerLLAMethods_async)
unittest.TextTestRunner(verbosity = 2).run(suite)
| 13,553 | 4,527 |
from math import pi, sin
def S(t, n, T):
return 4 / pi * sum(sin(2 * (2*i - 1) * pi * t / T) / (2*i - 1) for i in range(1, n + 1))
def f(t, T):
return 1 if 0 < t < T/2 else -1 if T/2 < t < T else 0
def test():
print(" n | alpha | error ")
print("-----+--------+----------")
T = 2*pi
for n in 1, 3, 5, 10, 30, 100:
for alpha in 0.01, 0.25, 0.49:
t = alpha * T
error = f(t, T) - S(t, n, T)
print(f" {n:3} | {alpha:6} | {error:8.4f}")
if __name__ == '__main__':
test()
| 548 | 260 |
# MIT License
#
# Copyright (c) 2021 Soohwan Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
from luna_transformer.embedding import PositionalEncoding
from luna_transformer.encoder import LunaTransformerEncoderLayer
from luna_transformer.mask import get_attn_pad_mask
class LunaTransformerEncoder(nn.Module):
"""
Transformer encoder architecture applied Linear Unified Nested Attention (Luna).
Luna was proposed in the paper "Luna: Linear Unified Nested Attention" (https://arxiv.org/abs/2106.01540.pdf)
"""
def __init__(
self,
vocab_size: int,
d_model: int,
num_layers: int = 6,
num_attention_heads: int = 8,
d_ff: int = 2048,
dropout_p: float = 0.1,
project_embedding_length: int = 32,
max_length: int = 1024,
):
super(LunaTransformerEncoder, self).__init__()
self.d_model = d_model
self.projected_embedding_length = project_embedding_length
self.projected_embeddings = nn.Parameter(torch.Tensor(project_embedding_length, self.d_model))
self.projected_positions = PositionalEncoding(self.d_model, project_embedding_length)
nn.init.normal_(self.projected_embeddings, mean=0.0, std=self.d_model ** -0.5)
self.input_embedding = nn.Embedding(vocab_size, d_model)
self.dropout = nn.Dropout(p=dropout_p)
self.input_positions = PositionalEncoding(d_model, max_length)
self.input_norm = nn.LayerNorm(d_model)
self.embed_scale = math.sqrt(self.d_model)
self.layers = nn.ModuleList([
LunaTransformerEncoderLayer(
d_model=d_model,
num_attention_heads=num_attention_heads,
d_ff=d_ff,
dropout_p=dropout_p,
) for _ in range(num_layers)
])
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
batch_size, seq_length = inputs.size()
attention_padding_mask = get_attn_pad_mask(inputs, input_lengths, self.projected_embedding_length)
embedded = self.input_embedding(inputs)
embedded *= self.embed_scale
projected_embedded = self.projected_embeddings * self.embed_scale
embedded += self.input_positions(embedded.size(1))
projected_embedded += self.projected_positions(self.projected_embedding_length).squeeze(0)
seq_length, dim = projected_embedded.size()
projected_embedded = projected_embedded.unsqueeze(0).expand(batch_size, seq_length, dim)
outputs = self.dropout(embedded)
p = self.dropout(projected_embedded)
for layer in self.layers:
outputs, p = layer(outputs, p, attention_padding_mask)
return outputs
| 3,831 | 1,257 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AddonTagArgs',
'ClusterEncryptionConfigProviderPropertiesArgs',
'ClusterEncryptionConfigArgs',
'ClusterKubernetesNetworkConfigArgs',
'ClusterLoggingArgs',
'ClusterResourcesVpcConfigArgs',
'ClusterTagArgs',
'FargateProfileLabelArgs',
'FargateProfileSelectorArgs',
'FargateProfileTagArgs',
'NodegroupLaunchTemplateSpecificationArgs',
'NodegroupRemoteAccessArgs',
'NodegroupScalingConfigArgs',
'NodegroupTaintArgs',
'NodegroupUpdateConfigArgs',
]
@pulumi.input_type
class AddonTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterEncryptionConfigProviderPropertiesArgs:
def __init__(__self__, *,
key_arn: Optional[pulumi.Input[str]] = None):
"""
The encryption provider for the cluster.
:param pulumi.Input[str] key_arn: Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric, created in the same region as the cluster, and if the KMS key was created in a different account, the user must have access to the KMS key.
"""
if key_arn is not None:
pulumi.set(__self__, "key_arn", key_arn)
@property
@pulumi.getter(name="keyArn")
def key_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric, created in the same region as the cluster, and if the KMS key was created in a different account, the user must have access to the KMS key.
"""
return pulumi.get(self, "key_arn")
@key_arn.setter
def key_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_arn", value)
@pulumi.input_type
class ClusterEncryptionConfigArgs:
def __init__(__self__, *,
provider: Optional[pulumi.Input['ClusterEncryptionConfigProviderPropertiesArgs']] = None,
resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The encryption configuration for the cluster
:param pulumi.Input['ClusterEncryptionConfigProviderPropertiesArgs'] provider: The encryption provider for the cluster.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resources: Specifies the resources to be encrypted. The only supported value is "secrets".
"""
if provider is not None:
pulumi.set(__self__, "provider", provider)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def provider(self) -> Optional[pulumi.Input['ClusterEncryptionConfigProviderPropertiesArgs']]:
"""
The encryption provider for the cluster.
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: Optional[pulumi.Input['ClusterEncryptionConfigProviderPropertiesArgs']]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the resources to be encrypted. The only supported value is "secrets".
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ClusterKubernetesNetworkConfigArgs:
def __init__(__self__, *,
ip_family: Optional[pulumi.Input['ClusterKubernetesNetworkConfigIpFamily']] = None,
service_ipv4_cidr: Optional[pulumi.Input[str]] = None,
service_ipv6_cidr: Optional[pulumi.Input[str]] = None):
"""
The Kubernetes network configuration for the cluster.
:param pulumi.Input['ClusterKubernetesNetworkConfigIpFamily'] ip_family: Ipv4 or Ipv6, Ipv6 is only supported on cluster with k8s version 1.21
:param pulumi.Input[str] service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC.
:param pulumi.Input[str] service_ipv6_cidr: The CIDR block to assign Kubernetes service IP addresses from.
"""
if ip_family is not None:
pulumi.set(__self__, "ip_family", ip_family)
if service_ipv4_cidr is not None:
pulumi.set(__self__, "service_ipv4_cidr", service_ipv4_cidr)
if service_ipv6_cidr is not None:
pulumi.set(__self__, "service_ipv6_cidr", service_ipv6_cidr)
@property
@pulumi.getter(name="ipFamily")
def ip_family(self) -> Optional[pulumi.Input['ClusterKubernetesNetworkConfigIpFamily']]:
"""
Ipv4 or Ipv6, Ipv6 is only supported on cluster with k8s version 1.21
"""
return pulumi.get(self, "ip_family")
@ip_family.setter
def ip_family(self, value: Optional[pulumi.Input['ClusterKubernetesNetworkConfigIpFamily']]):
pulumi.set(self, "ip_family", value)
@property
@pulumi.getter(name="serviceIpv4Cidr")
def service_ipv4_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC.
"""
return pulumi.get(self, "service_ipv4_cidr")
@service_ipv4_cidr.setter
def service_ipv4_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_ipv4_cidr", value)
@property
@pulumi.getter(name="serviceIpv6Cidr")
def service_ipv6_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block to assign Kubernetes service IP addresses from.
"""
return pulumi.get(self, "service_ipv6_cidr")
@service_ipv6_cidr.setter
def service_ipv6_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_ipv6_cidr", value)
@pulumi.input_type
class ClusterLoggingArgs:
def __init__(__self__, *,
cluster_logging: Optional[pulumi.Input['ClusterLoggingArgs']] = None):
"""
Enable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs based on log types. By default, cluster control plane logs aren't exported to CloudWatch Logs.
:param pulumi.Input['ClusterLoggingArgs'] cluster_logging: The cluster control plane logging configuration for your cluster.
"""
if cluster_logging is not None:
pulumi.set(__self__, "cluster_logging", cluster_logging)
@property
@pulumi.getter(name="clusterLogging")
def cluster_logging(self) -> Optional[pulumi.Input['ClusterLoggingArgs']]:
"""
The cluster control plane logging configuration for your cluster.
"""
return pulumi.get(self, "cluster_logging")
@cluster_logging.setter
def cluster_logging(self, value: Optional[pulumi.Input['ClusterLoggingArgs']]):
pulumi.set(self, "cluster_logging", value)
@pulumi.input_type
class ClusterResourcesVpcConfigArgs:
def __init__(__self__, *,
subnet_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
endpoint_private_access: Optional[pulumi.Input[bool]] = None,
endpoint_public_access: Optional[pulumi.Input[bool]] = None,
public_access_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
An object representing the VPC configuration to use for an Amazon EKS cluster.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your nodes and the Kubernetes control plane.
:param pulumi.Input[bool] endpoint_private_access: Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods.
:param pulumi.Input[bool] endpoint_public_access: Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_access_cidrs: The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have nodes or AWS Fargate pods in the cluster, then ensure that you specify the necessary CIDR blocks.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify a security group, the default security group for your VPC is used.
"""
pulumi.set(__self__, "subnet_ids", subnet_ids)
if endpoint_private_access is not None:
pulumi.set(__self__, "endpoint_private_access", endpoint_private_access)
if endpoint_public_access is not None:
pulumi.set(__self__, "endpoint_public_access", endpoint_public_access)
if public_access_cidrs is not None:
pulumi.set(__self__, "public_access_cidrs", public_access_cidrs)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your nodes and the Kubernetes control plane.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter(name="endpointPrivateAccess")
def endpoint_private_access(self) -> Optional[pulumi.Input[bool]]:
"""
Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods.
"""
return pulumi.get(self, "endpoint_private_access")
@endpoint_private_access.setter
def endpoint_private_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "endpoint_private_access", value)
@property
@pulumi.getter(name="endpointPublicAccess")
def endpoint_public_access(self) -> Optional[pulumi.Input[bool]]:
"""
Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server.
"""
return pulumi.get(self, "endpoint_public_access")
@endpoint_public_access.setter
def endpoint_public_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "endpoint_public_access", value)
@property
@pulumi.getter(name="publicAccessCidrs")
def public_access_cidrs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have nodes or AWS Fargate pods in the cluster, then ensure that you specify the necessary CIDR blocks.
"""
return pulumi.get(self, "public_access_cidrs")
@public_access_cidrs.setter
def public_access_cidrs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_access_cidrs", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify a security group, the default security group for your VPC is used.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@pulumi.input_type
class ClusterTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class FargateProfileLabelArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a pod.
:param pulumi.Input[str] key: The key name of the label.
:param pulumi.Input[str] value: The value for the label.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the label.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the label.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class FargateProfileSelectorArgs:
def __init__(__self__, *,
namespace: pulumi.Input[str],
labels: Optional[pulumi.Input[Sequence[pulumi.Input['FargateProfileLabelArgs']]]] = None):
pulumi.set(__self__, "namespace", namespace)
if labels is not None:
pulumi.set(__self__, "labels", labels)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FargateProfileLabelArgs']]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FargateProfileLabelArgs']]]]):
pulumi.set(self, "labels", value)
@pulumi.input_type
class FargateProfileTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class NodegroupLaunchTemplateSpecificationArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class NodegroupRemoteAccessArgs:
def __init__(__self__, *,
ec2_ssh_key: pulumi.Input[str],
source_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "ec2_ssh_key", ec2_ssh_key)
if source_security_groups is not None:
pulumi.set(__self__, "source_security_groups", source_security_groups)
@property
@pulumi.getter(name="ec2SshKey")
def ec2_ssh_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "ec2_ssh_key")
@ec2_ssh_key.setter
def ec2_ssh_key(self, value: pulumi.Input[str]):
pulumi.set(self, "ec2_ssh_key", value)
@property
@pulumi.getter(name="sourceSecurityGroups")
def source_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "source_security_groups")
@source_security_groups.setter
def source_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "source_security_groups", value)
@pulumi.input_type
class NodegroupScalingConfigArgs:
def __init__(__self__, *,
desired_size: Optional[pulumi.Input[float]] = None,
max_size: Optional[pulumi.Input[float]] = None,
min_size: Optional[pulumi.Input[float]] = None):
if desired_size is not None:
pulumi.set(__self__, "desired_size", desired_size)
if max_size is not None:
pulumi.set(__self__, "max_size", max_size)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
@property
@pulumi.getter(name="desiredSize")
def desired_size(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "desired_size")
@desired_size.setter
def desired_size(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "desired_size", value)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "min_size", value)
@pulumi.input_type
class NodegroupTaintArgs:
def __init__(__self__, *,
effect: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class NodegroupUpdateConfigArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input[float]] = None,
max_unavailable_percentage: Optional[pulumi.Input[float]] = None):
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
if max_unavailable_percentage is not None:
pulumi.set(__self__, "max_unavailable_percentage", max_unavailable_percentage)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_unavailable", value)
@property
@pulumi.getter(name="maxUnavailablePercentage")
def max_unavailable_percentage(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max_unavailable_percentage")
@max_unavailable_percentage.setter
def max_unavailable_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_unavailable_percentage", value)
| 28,356 | 8,638 |
import dbops
from loguru import logger
import traceback
from bson.objectid import ObjectId
import utils
import global_vars as g
import fs_utils
def create_testboard(data,userID,organizationID):
try:
if dbops.check_if_exists("testboards","apiName",data["apiName"]):
message = "Testboard named '"+data["apiName"]+"' already exists."
logger.error(message)
return "",message
testboard_id = dbops.insert_testboard(
data["apiName"],
data["apiType"],
data["apiEnvironment"],
data["visibility"],
userID,
organizationID
)
for r in data["apiRequests"]:
request_id = dbops.insert_request(
testboard_id ,
r["apiHeader"] ,
r["apiHttpMethod"] ,
r["apiEndpoint"] ,
r["apiRequestBody"] ,
r["apiResponseBody"] ,
r["apiInputDataType"] ,
r["apiRequestBodyType"] ,
r["apiResponseBodyType"]
)
dbops.push_request_in_testboard(testboard_id,request_id)
return testboard_id,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def update_testboard(data,userID):
try:
testboardID = data["testboardID"]
if dbops.check_if_exists("testboards","_id",ObjectId(testboardID)) == False:
message = "Testboard with ID '"+testboardID+"' does not exist."
logger.error(message)
return "",message
for d in data:
if d in ["apiName","apiEnvironment"]:
r = dbops.update_collection("testboards",testboardID,d,data[d])
if r == False:
return None,"Unable to update entities"
ownership,msg = utils.check_ownership("testboards",ObjectId(testboardID),userID)
if ownership:
r = dbops.update_collection("testboards",testboardID,"visibility",data["visibility"])
if r == False:
return None,"Unable to update visibility"
cleared = dbops.clear_all_requests(testboardID)
if not cleared:
return None, "Unable to remove existing requests"
r = dbops.update_collection("testboards",testboardID,"apiRequests",[])
if r == False:
return None,"Unable to update apiRequests"
for request in data["apiRequests"]:
request_id = dbops.insert_request(
testboardID ,
request["apiHeader"] ,
request["apiHttpMethod"] ,
request["apiEndpoint"] ,
request["apiRequestBody"] ,
request["apiResponseBody"] ,
request["apiInputDataType"] ,
request["apiRequestBodyType"] ,
request["apiResponseBodyType"]
)
dbops.push_request_in_testboard(testboardID,request_id)
dbops.update_collection("testboards",testboardID,"apiLastUpdatedBy",userID)
return testboardID,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def get_testboard(testboard_id):
try:
testboard_details = dbops.get_testboard(testboard_id)
if testboard_details is None:
return None,"testboard not found"
testboard_details["testboardID"] = str(testboard_details["_id"])
del testboard_details["_id"]
api_requests = []
for reqID in testboard_details["apiRequests"]:
req_data = dbops.get_request(reqID)
req_data["requestID"] = str(req_data["_id"])
del req_data["_id"]
api_requests.append(req_data)
testboard_details["apiRequests"] = api_requests
return testboard_details,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def list_testboard(userID,organizationID):
try:
testboard_list = dbops.list_testboards(userID,organizationID)
for i in range(len(testboard_list)):
testboard_list[i]["testboardID"] = str(testboard_list[i]["_id"])
del testboard_list[i]["_id"]
testboard_list[i]["key"] = i+1
testboard_list[i]["apiType"] = g.api_named_types[testboard_list[i]["apiType"]]
creator = dbops.fetch_user_details(testboard_list[i]["creatorID"])
if creator is not None:
testboard_list[i]["creator"] = creator["firstName"]
else:
testboard_list[i]["creator"] = "User deleted"
return testboard_list,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def get_test_files(testboardID):
testboard = dbops.get_testboard(testboardID)
if testboard is None:
message = "TestboardID does not exist"
logger.error(message)
return None,message
if testboard["apiType"] == "imageclassification":
file_list,msg = get_image_classification_test_files(testboardID)
return file_list,msg
return None,"This should never be returned, probably a bug."
def get_image_classification_test_files(testboardID):
image_list = dbops.get_images_for_testboard(testboardID)
for i in range(len(image_list)):
image_list[i]["imageID"] = str(image_list[i]["_id"])
del image_list[i]["_id"]
del image_list[i]["imageUrl"]
image_list[i]["key"] = i+1
image_list[i]["imageResolution"] = str(image_list[i]["imageHeight"])+"x"+str(image_list[i]["imageWidth"])
image_list[i]["className"] = image_list[i]["annotation"]
image_list[i]["fileSize"] = str(round((image_list[i]["fileSize"])/1024,1)) + "kB"
return image_list,"success"
def delete_test_files(testboardID,imageIDs):
images_urls = dbops.get_links_for_images(testboardID,imageIDs)
for url in images_urls:
r = fs_utils.delete_from_fs(url["imageUrl"])
if r == False:
logger.error(f"unable to delete {url} from fs")
delete_count = dbops.delete_images_from_testboard(testboardID,imageIDs)
return delete_count
def update_image_visibility(testboardID,imageIDs,visible):
modified_count = dbops.update_image_visibility(imageIDs,visible)
return modified_count
def update_testfile_annotation(testboardID,imageID,annotation):
r = dbops.update_testfile_annotation(testboardID,imageID,annotation)
return r
| 6,726 | 2,030 |
import smtplib
import os
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
GMAIL_USERNAME = client_secret=os.getenv("GMAIL_USERNAME")
GMAIL_PASSWORD = client_secret=os.getenv("GMAIL_PASSWORD")
async def send_mail(send_to, subject, text, files=None):
assert isinstance(send_to, list)
print("Creating email...", flush=True)
msg = MIMEMultipart()
msg["From"] = GMAIL_USERNAME
msg["To"] = COMMASPACE.join(send_to)
msg["Date"] = formatdate(localtime=True)
msg["Subject"] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
# After the file is closed
part["Content-Disposition"] = f"attachment; filename={basename(f)}"
msg.attach(part)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.ehlo()
server.login(GMAIL_USERNAME, GMAIL_PASSWORD)
print("Sending email!", flush=True)
server.sendmail(GMAIL_USERNAME, send_to, msg.as_string())
server.close()
print("Sent email!", flush=True)
| 1,308 | 444 |