hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c31f0165563b2c602dfa5708fec677735dd636a | 5,527 | py | Python | coogger/cooggerapp/views/home.py | ewuoso/coogger | 11df6f8487b59bd06f9a496efde3fec998a64217 | [
"MIT"
] | null | null | null | coogger/cooggerapp/views/home.py | ewuoso/coogger | 11df6f8487b59bd06f9a496efde3fec998a64217 | [
"MIT"
] | null | null | null | coogger/cooggerapp/views/home.py | ewuoso/coogger | 11df6f8487b59bd06f9a496efde3fec998a64217 | [
"MIT"
] | null | null | null | #django
from django.http import *
from django.shortcuts import render
from django.contrib.auth import *
from django.db.models import Q
from django.contrib import messages as ms
from django.contrib.auth.models import User
#django class based
from django.views.generic.base import TemplateView
from django.views import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
#form
from cooggerapp.forms import ReportsForm
#models
from cooggerapp.models import Content, SearchedWords, ReportModel, Following, OtherInformationOfUsers
from social_django.models import UserSocialAuth
#views
from cooggerapp.views.tools import paginator
import json
from sc2py.sc2py import Sc2
#steem
from steem.post import Post
from steem.amount import Amount
class Home(TemplateView):
template_name = "card/blogs.html"
queryset = Content.objects.filter(status = "approved")
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context["content"] = paginator(self.request,self.queryset)
return context
class Upvote(View):
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
user = request.POST["user"]
permlink = request.POST["permlink"]
weight = OtherInformationOfUsers.objects.filter(user = request.user)[0].vote_percent
try:
self.get_sc2(request).vote(voter = request.user.username, author = user, permlink = permlink, weight = int(weight))
return HttpResponse(json.dumps({"upvote":True,"payout":self.get_payout(user,permlink)}))
except Exception as e :
return HttpResponse(json.dumps({"upvote":False,"error":str(e)}))
def get_sc2(self, request):
access_token = UserSocialAuth.objects.filter(uid = request.user)[0].extra_data["access_token"]
return Sc2(str(access_token))
@staticmethod
def get_payout(user,permlink):
def pending_payout(post):
payout = Amount(post.pending_payout_value).amount
if payout == 0:
payout = (Amount(post.total_payout_value).amount + Amount(post.curator_payout_value).amount)
return payout
get_absolute_url = "@"+user+"/"+permlink
post = Post(post = get_absolute_url)
payout = round(pending_payout(post),4)
return payout
class Feed(View):
template_name = "card/blogs.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs): # TODO: buradaki işlemin daha hızlı olanı vardır ya
oof = []
queryset = []
for i in Following.objects.filter(user = request.user):
i_wuser = i.which_user
oof.append(i.which_user)
for q in Content.objects.filter(status = "approved"):
if q.user in oof:
queryset.append(q)
info_of_cards = paginator(request,queryset)
context = dict(
content = info_of_cards,
)
return render(request, self.template_name, context)
class Review(View):
template_name = "card/blogs.html"
def get(self, request, *args, **kwargs): # TODO: buradaki işlemin daha hızlı olanı vardır ya
queryset = Content.objects.filter(status = "shared")
info_of_cards = paginator(request,queryset)
context = dict(
content = info_of_cards,
)
return render(request, self.template_name, context)
class Search(TemplateView):
template_name = "card/blogs.html"
def get_context_data(self, **kwargs):
context = super(Search, self).get_context_data(**kwargs)
context["content"] = paginator(self.request,self.get_queryset())
return context
def get_form_data(self,name = "query"):
name = self.request.GET[name].lower()
SearchedWords(word = name).save()
return name
def search_algorithm(self):
searched_data = self.get_form_data()
q = Q(title__contains = searched_data) | Q(content_list__contains = searched_data) | Q(content__contains = searched_data)
queryset = Content.objects.filter(q,status = "approved").order_by("-views")
return queryset
def get_queryset(self):
queryset = self.search_algorithm()
return queryset
class Report(View):
form_class = ReportsForm
template_name = "home/report.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
report_form = self.form_class()
context = dict(
report_form = report_form,
content_id = request.GET["content_id"],
)
return render(request, self.template_name, context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
report_form = self.form_class(request.POST)
if report_form.is_valid():
content = Content.objects.filter(id = request.POST["content_id"])[0]
if ReportModel.objects.filter(user = request.user,content = content).exists():
ms.error(request,"Your complaint is in the evaluation process.")
return HttpResponseRedirect("/")
report_form = report_form.save(commit=False)
report_form.user = request.user
report_form.content = content
report_form.save()
ms.error(request,"Your complaint has been received.")
return HttpResponseRedirect("/")
return HttpResponse(self.get(request, *args, **kwargs))
| 36.361842 | 129 | 0.671974 |
from django.http import *
from django.shortcuts import render
from django.contrib.auth import *
from django.db.models import Q
from django.contrib import messages as ms
from django.contrib.auth.models import User
from django.views.generic.base import TemplateView
from django.views import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from cooggerapp.forms import ReportsForm
from cooggerapp.models import Content, SearchedWords, ReportModel, Following, OtherInformationOfUsers
from social_django.models import UserSocialAuth
from cooggerapp.views.tools import paginator
import json
from sc2py.sc2py import Sc2
from steem.post import Post
from steem.amount import Amount
class Home(TemplateView):
template_name = "card/blogs.html"
queryset = Content.objects.filter(status = "approved")
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context["content"] = paginator(self.request,self.queryset)
return context
class Upvote(View):
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
user = request.POST["user"]
permlink = request.POST["permlink"]
weight = OtherInformationOfUsers.objects.filter(user = request.user)[0].vote_percent
try:
self.get_sc2(request).vote(voter = request.user.username, author = user, permlink = permlink, weight = int(weight))
return HttpResponse(json.dumps({"upvote":True,"payout":self.get_payout(user,permlink)}))
except Exception as e :
return HttpResponse(json.dumps({"upvote":False,"error":str(e)}))
def get_sc2(self, request):
access_token = UserSocialAuth.objects.filter(uid = request.user)[0].extra_data["access_token"]
return Sc2(str(access_token))
@staticmethod
def get_payout(user,permlink):
def pending_payout(post):
payout = Amount(post.pending_payout_value).amount
if payout == 0:
payout = (Amount(post.total_payout_value).amount + Amount(post.curator_payout_value).amount)
return payout
get_absolute_url = "@"+user+"/"+permlink
post = Post(post = get_absolute_url)
payout = round(pending_payout(post),4)
return payout
class Feed(View):
template_name = "card/blogs.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
oof = []
queryset = []
for i in Following.objects.filter(user = request.user):
i_wuser = i.which_user
oof.append(i.which_user)
for q in Content.objects.filter(status = "approved"):
if q.user in oof:
queryset.append(q)
info_of_cards = paginator(request,queryset)
context = dict(
content = info_of_cards,
)
return render(request, self.template_name, context)
class Review(View):
template_name = "card/blogs.html"
def get(self, request, *args, **kwargs):
queryset = Content.objects.filter(status = "shared")
info_of_cards = paginator(request,queryset)
context = dict(
content = info_of_cards,
)
return render(request, self.template_name, context)
class Search(TemplateView):
template_name = "card/blogs.html"
def get_context_data(self, **kwargs):
context = super(Search, self).get_context_data(**kwargs)
context["content"] = paginator(self.request,self.get_queryset())
return context
def get_form_data(self,name = "query"):
name = self.request.GET[name].lower()
SearchedWords(word = name).save()
return name
def search_algorithm(self):
searched_data = self.get_form_data()
q = Q(title__contains = searched_data) | Q(content_list__contains = searched_data) | Q(content__contains = searched_data)
queryset = Content.objects.filter(q,status = "approved").order_by("-views")
return queryset
def get_queryset(self):
queryset = self.search_algorithm()
return queryset
class Report(View):
form_class = ReportsForm
template_name = "home/report.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
report_form = self.form_class()
context = dict(
report_form = report_form,
content_id = request.GET["content_id"],
)
return render(request, self.template_name, context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
report_form = self.form_class(request.POST)
if report_form.is_valid():
content = Content.objects.filter(id = request.POST["content_id"])[0]
if ReportModel.objects.filter(user = request.user,content = content).exists():
ms.error(request,"Your complaint is in the evaluation process.")
return HttpResponseRedirect("/")
report_form = report_form.save(commit=False)
report_form.user = request.user
report_form.content = content
report_form.save()
ms.error(request,"Your complaint has been received.")
return HttpResponseRedirect("/")
return HttpResponse(self.get(request, *args, **kwargs))
| true | true |
1c31f0b1f24381e5502feacf82f5d0b19649b603 | 69 | py | Python | optimized_transducer/python/optimized_transducer/__init__.py | luomingshuang/optimized_transducer | 80883bb2910d7d9619adb88bfde4034207b7f79a | [
"Apache-2.0"
] | 40 | 2021-12-23T09:25:01.000Z | 2022-03-31T07:29:16.000Z | optimized_transducer/python/optimized_transducer/__init__.py | thangdepzai/optimized_transducer | 4b9c97f37749b2507dfc5aed02d404b235cebc56 | [
"Apache-2.0"
] | 9 | 2021-12-28T12:54:20.000Z | 2022-03-21T10:35:06.000Z | optimized_transducer/python/optimized_transducer/__init__.py | thangdepzai/optimized_transducer | 4b9c97f37749b2507dfc5aed02d404b235cebc56 | [
"Apache-2.0"
] | 8 | 2021-12-28T12:29:38.000Z | 2022-03-23T02:33:17.000Z | from .transducer_loss import TransducerLoss, transducer_loss # noqa
| 34.5 | 68 | 0.84058 | from .transducer_loss import TransducerLoss, transducer_loss
| true | true |
1c31f0db5a4ffa44fe3ea5398ec5b665b4f6c693 | 573 | py | Python | rng/test_rng.py | brahamirabah94/teo-project-rabah | 55dceec8a19124a12cb50c3eac90138b5002be67 | [
"Apache-2.0"
] | null | null | null | rng/test_rng.py | brahamirabah94/teo-project-rabah | 55dceec8a19124a12cb50c3eac90138b5002be67 | [
"Apache-2.0"
] | null | null | null | rng/test_rng.py | brahamirabah94/teo-project-rabah | 55dceec8a19124a12cb50c3eac90138b5002be67 | [
"Apache-2.0"
] | 1 | 2021-04-11T23:53:01.000Z | 2021-04-11T23:53:01.000Z | import rng
import socket
import pytest
hostname = socket.gethostname()
@pytest.fixture
def tester():
tester = rng.index()
return tester
def test_index_type(tester):
assert type(tester) is str
def test_index_content(tester):
assert "RNG running on {}\n".format(hostname) in tester
@pytest.fixture
def test():
test = rng.rng(32)
return test
def test_rng_status(test):
statuscode = test.status_code
assert statuscode == 200
def test_rng_content(test):
content = test.content_type
assert content == "application/octet-stream"
| 19.1 | 61 | 0.712042 | import rng
import socket
import pytest
hostname = socket.gethostname()
@pytest.fixture
def tester():
tester = rng.index()
return tester
def test_index_type(tester):
assert type(tester) is str
def test_index_content(tester):
assert "RNG running on {}\n".format(hostname) in tester
@pytest.fixture
def test():
test = rng.rng(32)
return test
def test_rng_status(test):
statuscode = test.status_code
assert statuscode == 200
def test_rng_content(test):
content = test.content_type
assert content == "application/octet-stream"
| true | true |
1c31f0e249e863c2aaf5c8ca2c12a20dbc48509b | 524 | py | Python | test/unit/api/test_configuration.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1 | 2021-05-18T02:20:43.000Z | 2021-05-18T02:20:43.000Z | test/unit/api/test_configuration.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | null | null | null | test/unit/api/test_configuration.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | null | null | null | from galaxy.webapps.galaxy.api.configuration import parse_serialization_params
def test_parse_serialization_params():
view, default_view = 'a', 'b'
keys = 'foo'
serialized = parse_serialization_params(view, keys, default_view)
assert serialized['view'] == view
assert serialized['default_view'] == default_view
assert serialized['keys'] == [keys]
keys = 'foo,bar,baz'
serialized = parse_serialization_params(view, keys, default_view)
assert serialized['keys'] == ['foo', 'bar', 'baz']
| 34.933333 | 78 | 0.709924 | from galaxy.webapps.galaxy.api.configuration import parse_serialization_params
def test_parse_serialization_params():
view, default_view = 'a', 'b'
keys = 'foo'
serialized = parse_serialization_params(view, keys, default_view)
assert serialized['view'] == view
assert serialized['default_view'] == default_view
assert serialized['keys'] == [keys]
keys = 'foo,bar,baz'
serialized = parse_serialization_params(view, keys, default_view)
assert serialized['keys'] == ['foo', 'bar', 'baz']
| true | true |
1c31f16bbdcd758726438a6e84c843cc19fe9ff0 | 8,936 | py | Python | orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py | mrshu/onnxruntime | 335edaa2c485ba0dec877bf4cdbd652e2d5d105c | [
"MIT"
] | 1 | 2021-03-23T16:25:11.000Z | 2021-03-23T16:25:11.000Z | orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py | zener90818/onnxruntime | a7a2a16edddc283b53d7737f897b4bbda5e86209 | [
"MIT"
] | null | null | null | orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py | zener90818/onnxruntime | a7a2a16edddc283b53d7737f897b4bbda5e86209 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import torch
import time
from torchvision import datasets, transforms
import onnxruntime
from onnxruntime.training import ORTModule
class NeuralNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out = self.relu(out)
out = self.fc2(out)
return out
def train(args, model, device, optimizer, loss_fn, train_loader, epoch):
print('\n======== Epoch {:} / {:} with batch size {:} ========'.format(epoch+1, args.epochs, args.batch_size))
model.train()
# Measure how long the training epoch takes.
t0 = time.time()
start_time = t0
# Reset the total loss for this epoch.
total_loss = 0
for iteration, (data, target) in enumerate(train_loader):
if iteration == args.train_steps:
break
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
optimizer.zero_grad()
probability = model(data)
if args.view_graphs:
import torchviz
pytorch_backward_graph = torchviz.make_dot(probability, params=dict(list(model.named_parameters())))
pytorch_backward_graph.view()
loss = loss_fn(probability, target)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_loss += loss.item()
loss.backward()
optimizer.step()
# Stats
if iteration % args.log_interval == 0:
curr_time = time.time()
elapsed_time = curr_time - start_time
print('[{:5}/{:5} ({:2.0f}%)]\tLoss: {:.6f}\tExecution time: {:.4f}'.format(
iteration * len(data), len(train_loader.dataset),
100. * iteration / len(train_loader), loss, elapsed_time))
start_time = curr_time
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_loader)
epoch_time = time.time() - t0
print("\n Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epoch took: {:.4f}s".format(epoch_time))
return epoch_time
def test(args, model, device, loss_fn, test_loader):
model.eval()
t0 = time.time()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
output = model(data)
# Stats
test_loss += loss_fn(output, target, False).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Batch size: {:}, Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
args.test_batch_size, test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# Report the final accuracy for this validation run.
epoch_time = time.time() - t0
accuracy = float(correct)/len(test_loader.dataset)
print(" Accuracy: {0:.2f}".format(accuracy))
print(" Validation took: {:.4f}s".format(epoch_time))
return epoch_time, accuracy
def my_loss(x, target, is_train=True):
if is_train:
return torch.nn.CrossEntropyLoss()(x, target)
else:
return torch.nn.CrossEntropyLoss(reduction='sum')(x, target)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--train-steps', type=int, default=-1, metavar='N',
help='number of steps to train. Set -1 to run through whole dataset (default: -1)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--pytorch-only', action='store_true', default=False,
help='disables ONNX Runtime training')
parser.add_argument('--log-interval', type=int, default=300, metavar='N',
help='how many batches to wait before logging training status (default: 300)')
parser.add_argument('--view-graphs', action='store_true', default=False,
help='views forward and backward graphs')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING',
help='Log level (default: WARNING)')
parser.add_argument('--data-dir', type=str, default='./mnist',
help='Path to the mnist data directory')
args = parser.parse_args()
# Common setup
torch.manual_seed(args.seed)
onnxruntime.set_seed(args.seed)
if not args.no_cuda and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
## Data loader
train_loader = torch.utils.data.DataLoader(datasets.MNIST(args.data_dir, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size,
shuffle=True)
test_loader = None
if args.test_batch_size > 0:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.test_batch_size, shuffle=True)
# Model architecture
model = NeuralNet(input_size=784, hidden_size=500, num_classes=10).to(device)
if not args.pytorch_only:
print('Training MNIST on ORTModule....')
model = ORTModule(model)
# TODO: change it to False to stop saving ONNX models
model._save_onnx = True
model._save_onnx_prefix = 'MNIST'
# Set log level
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logging.basicConfig(level=numeric_level)
else:
print('Training MNIST on vanilla PyTorch....')
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
# Train loop
total_training_time, total_test_time, epoch_0_training, validation_accuracy = 0, 0, 0, 0
for epoch in range(0, args.epochs):
total_training_time += train(args, model, device, optimizer, my_loss, train_loader, epoch)
if not args.pytorch_only and epoch == 0:
epoch_0_training = total_training_time
if args.test_batch_size > 0:
test_time, validation_accuracy = test(args, model, device, my_loss, test_loader)
total_test_time += test_time
assert validation_accuracy > 0.92
print('\n======== Global stats ========')
if not args.pytorch_only:
estimated_export = 0
if args.epochs > 1:
estimated_export = epoch_0_training - (total_training_time - epoch_0_training)/(args.epochs-1)
print(" Estimated ONNX export took: {:.4f}s".format(estimated_export))
else:
print(" Estimated ONNX export took: Estimate available when epochs > 1 only")
print(" Accumulated training without export took: {:.4f}s".format(total_training_time - estimated_export))
print(" Accumulated training took: {:.4f}s".format(total_training_time))
print(" Accumulated validation took: {:.4f}s".format(total_test_time))
if __name__ == '__main__':
main()
| 41.953052 | 120 | 0.614593 | import argparse
import logging
import os
import torch
import time
from torchvision import datasets, transforms
import onnxruntime
from onnxruntime.training import ORTModule
class NeuralNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out = self.relu(out)
out = self.fc2(out)
return out
def train(args, model, device, optimizer, loss_fn, train_loader, epoch):
print('\n======== Epoch {:} / {:} with batch size {:} ========'.format(epoch+1, args.epochs, args.batch_size))
model.train()
t0 = time.time()
start_time = t0
total_loss = 0
for iteration, (data, target) in enumerate(train_loader):
if iteration == args.train_steps:
break
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
optimizer.zero_grad()
probability = model(data)
if args.view_graphs:
import torchviz
pytorch_backward_graph = torchviz.make_dot(probability, params=dict(list(model.named_parameters())))
pytorch_backward_graph.view()
loss = loss_fn(probability, target)
total_loss += loss.item()
loss.backward()
optimizer.step()
if iteration % args.log_interval == 0:
curr_time = time.time()
elapsed_time = curr_time - start_time
print('[{:5}/{:5} ({:2.0f}%)]\tLoss: {:.6f}\tExecution time: {:.4f}'.format(
iteration * len(data), len(train_loader.dataset),
100. * iteration / len(train_loader), loss, elapsed_time))
start_time = curr_time
avg_train_loss = total_loss / len(train_loader)
epoch_time = time.time() - t0
print("\n Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epoch took: {:.4f}s".format(epoch_time))
return epoch_time
def test(args, model, device, loss_fn, test_loader):
model.eval()
t0 = time.time()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
output = model(data)
test_loss += loss_fn(output, target, False).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Batch size: {:}, Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
args.test_batch_size, test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
epoch_time = time.time() - t0
accuracy = float(correct)/len(test_loader.dataset)
print(" Accuracy: {0:.2f}".format(accuracy))
print(" Validation took: {:.4f}s".format(epoch_time))
return epoch_time, accuracy
def my_loss(x, target, is_train=True):
if is_train:
return torch.nn.CrossEntropyLoss()(x, target)
else:
return torch.nn.CrossEntropyLoss(reduction='sum')(x, target)
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--train-steps', type=int, default=-1, metavar='N',
help='number of steps to train. Set -1 to run through whole dataset (default: -1)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--pytorch-only', action='store_true', default=False,
help='disables ONNX Runtime training')
parser.add_argument('--log-interval', type=int, default=300, metavar='N',
help='how many batches to wait before logging training status (default: 300)')
parser.add_argument('--view-graphs', action='store_true', default=False,
help='views forward and backward graphs')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING',
help='Log level (default: WARNING)')
parser.add_argument('--data-dir', type=str, default='./mnist',
help='Path to the mnist data directory')
args = parser.parse_args()
torch.manual_seed(args.seed)
onnxruntime.set_seed(args.seed)
if not args.no_cuda and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
ader = torch.utils.data.DataLoader(datasets.MNIST(args.data_dir, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size,
shuffle=True)
test_loader = None
if args.test_batch_size > 0:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.test_batch_size, shuffle=True)
model = NeuralNet(input_size=784, hidden_size=500, num_classes=10).to(device)
if not args.pytorch_only:
print('Training MNIST on ORTModule....')
model = ORTModule(model)
model._save_onnx = True
model._save_onnx_prefix = 'MNIST'
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logging.basicConfig(level=numeric_level)
else:
print('Training MNIST on vanilla PyTorch....')
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
total_training_time, total_test_time, epoch_0_training, validation_accuracy = 0, 0, 0, 0
for epoch in range(0, args.epochs):
total_training_time += train(args, model, device, optimizer, my_loss, train_loader, epoch)
if not args.pytorch_only and epoch == 0:
epoch_0_training = total_training_time
if args.test_batch_size > 0:
test_time, validation_accuracy = test(args, model, device, my_loss, test_loader)
total_test_time += test_time
assert validation_accuracy > 0.92
print('\n======== Global stats ========')
if not args.pytorch_only:
estimated_export = 0
if args.epochs > 1:
estimated_export = epoch_0_training - (total_training_time - epoch_0_training)/(args.epochs-1)
print(" Estimated ONNX export took: {:.4f}s".format(estimated_export))
else:
print(" Estimated ONNX export took: Estimate available when epochs > 1 only")
print(" Accumulated training without export took: {:.4f}s".format(total_training_time - estimated_export))
print(" Accumulated training took: {:.4f}s".format(total_training_time))
print(" Accumulated validation took: {:.4f}s".format(total_test_time))
if __name__ == '__main__':
main()
| true | true |
1c31f26c118cd7f40e870719edf96e1745687330 | 3,482 | py | Python | server/web/src/app/settings.py | jphacks/D_2002 | 6f97fa23d7512bad9b04bec81a2668cf43dfa1bc | [
"MIT"
] | 4 | 2020-11-01T07:28:02.000Z | 2022-02-05T04:31:03.000Z | server/web/src/app/settings.py | jphacks/D_2002 | 6f97fa23d7512bad9b04bec81a2668cf43dfa1bc | [
"MIT"
] | 33 | 2020-10-31T05:12:12.000Z | 2020-11-06T03:57:22.000Z | server/web/src/app/settings.py | jphacks/D_2002 | 6f97fa23d7512bad9b04bec81a2668cf43dfa1bc | [
"MIT"
] | 2 | 2020-11-22T01:43:32.000Z | 2021-01-23T07:43:37.000Z | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yzlz!c6gwp4gs#_51=wxzc2%a%c&%(4y38w7eqsb(l#qc87t&$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
'bootstrap4',
'stdimage',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja-JP'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'accounts.User'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, '/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| 25.602941 | 91 | 0.691557 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'yzlz!c6gwp4gs#_51=wxzc2%a%c&%(4y38w7eqsb(l#qc87t&$'
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
'bootstrap4',
'stdimage',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja-JP'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'accounts.User'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, '/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| true | true |
1c31f3c27afc537cf565ec4d4405ca174b0b6db4 | 430 | py | Python | tools/emmakenxx.py | diclophis/emscripten | 1e6009144e50f9a920208868003b6b93ea972732 | [
"MIT"
] | 8 | 2015-04-15T16:23:11.000Z | 2020-04-07T13:38:25.000Z | tools/emmakenxx.py | comforx/emscripten | f842201acec3c1edafb2916a76a8eb8d75474c2b | [
"MIT"
] | null | null | null | tools/emmakenxx.py | comforx/emscripten | f842201acec3c1edafb2916a76a8eb8d75474c2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
see emmaken.py
'''
import os, subprocess, sys
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('')]
from tools.shared import *
emmaken = path_from_root('tools', 'emmaken.py')
os.environ['EMMAKEN_CXX'] = '1'
exit(subprocess.call(['python', emmaken] + sys.argv[1:]))
| 22.631579 | 74 | 0.713953 |
import os, subprocess, sys
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('')]
from tools.shared import *
emmaken = path_from_root('tools', 'emmaken.py')
os.environ['EMMAKEN_CXX'] = '1'
exit(subprocess.call(['python', emmaken] + sys.argv[1:]))
| true | true |
1c31f48b540da46313b4bee4b84efc5fd97a0e71 | 11,718 | py | Python | src/app/mongodb.py | smlng/lbv | b8a584eac413ac85bd363154c69036cddc328477 | [
"MIT"
] | 1 | 2016-03-09T14:40:40.000Z | 2016-03-09T14:40:40.000Z | src/app/mongodb.py | smlng/lbv | b8a584eac413ac85bd363154c69036cddc328477 | [
"MIT"
] | 2 | 2016-03-23T07:46:03.000Z | 2016-04-19T15:05:55.000Z | src/app/mongodb.py | smlng/lbv | b8a584eac413ac85bd363154c69036cddc328477 | [
"MIT"
] | null | null | null | """
"""
import logging
from datetime import datetime
from pymongo import MongoClient, DESCENDING
from netaddr import IPNetwork
def get_ipversion_stats(dbconnstr):
""" generate ip version specific stats from database """
client = MongoClient(dbconnstr)
database = client.get_default_database()
if "validity_latest" not in database.collection_names() or \
database.validity_latest.count() == 0:
return None, None
types = ['num_', 'ips_']
# init ipv4 stats
ipv4_stats = dict()
for t in types:
ipv4_stats[t+'Valid'] = 0
ipv4_stats[t+'InvalidAS'] = 0
ipv4_stats[t+'InvalidLength'] = 0
ipv4_stats[t+'NotFound'] = 0
ipv4_stats['pfx_Valid'] = []
ipv4_stats['pfx_InvalidAS'] = []
ipv4_stats['pfx_InvalidLength'] = []
ipv4_stats['pfx_NotFound'] = []
# init ipv6 stats
ipv6_stats = dict()
for t in types:
ipv6_stats[t+'Valid'] = 0
ipv6_stats[t+'InvalidAS'] = 0
ipv6_stats[t+'InvalidLength'] = 0
ipv6_stats[t+'NotFound'] = 0
ipv6_stats['pfx_Valid'] = []
ipv6_stats['pfx_InvalidAS'] = []
ipv6_stats['pfx_InvalidLength'] = []
ipv6_stats['pfx_NotFound'] = []
try:
pipeline = [{"$group": {
"_id": '$value.validated_route.route.prefix',
"origins": {"$push": {
"asn": "$value.validated_route.route.origin_asn",
"validity": "$value.validated_route.validity.state"}}
}}]
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
# parse results
for res in results:
if res['_id'] is None:
logging.debug("emtpy record, skipping")
continue
ipn = IPNetwork(res['_id'])
b_val = {"Valid": False,
"InvalidLength": False,
"InvalidAS": False,
"NotFound": False}
if ipn.version == 4:
for asn in res['origins']:
if "num_"+asn['validity'] in ipv4_stats:
ipv4_stats["num_"+asn['validity']] += 1
else:
ipv4_stats["num_"+asn['validity']] = 1
b_val[asn['validity']] = True
if b_val['Valid']:
ipv4_stats["ips_Valid"] += ipn.size
ipv4_stats["pfx_Valid"].append(ipn.prefixlen)
elif b_val['InvalidLength']:
ipv4_stats["ips_InvalidLength"] += ipn.size
ipv4_stats["pfx_InvalidLength"].append(ipn.prefixlen)
elif b_val['InvalidAS']:
ipv4_stats["ips_InvalidAS"] += ipn.size
ipv4_stats["pfx_InvalidAS"].append(ipn.prefixlen)
elif b_val['NotFound']:
ipv4_stats["ips_NotFound"] += ipn.size
ipv4_stats["pfx_NotFound"].append(ipn.prefixlen)
elif ipn.version == 6:
for asn in res['origins']:
if "num_"+asn['validity'] in ipv6_stats:
ipv6_stats["num_"+asn['validity']] += 1
else:
ipv6_stats["num_"+asn['validity']] = 1
b_val[asn['validity']] = True
if b_val['Valid']:
ipv6_stats["ips_Valid"] += ipn.size
ipv6_stats["pfx_Valid"].append(ipn.prefixlen)
elif b_val['InvalidLength']:
ipv6_stats["ips_InvalidLength"] += ipn.size
ipv6_stats["pfx_InvalidLength"].append(ipn.prefixlen)
elif b_val['InvalidAS']:
ipv6_stats["ips_InvalidAS"] += ipn.size
ipv6_stats["pfx_InvalidAS"].append(ipn.prefixlen)
elif b_val['NotFound']:
ipv6_stats["ips_NotFound"] += ipn.size
ipv6_stats["pfx_NotFound"].append(ipn.prefixlen)
# end if b_val
# end if ip.version
# end for results
except Exception as errmsg:
logging.exception("get_ipversion_stats, error: " + str(errmsg))
ipv4_stats = None
ipv6_stats = None
# end try
# end if
return ipv4_stats, ipv6_stats
def get_dash_stats(dbconnstr):
client = MongoClient(dbconnstr)
database = client.get_default_database()
# init stats results
stats = dict()
stats['latest_dt'] = 'now'
stats['latest_ts'] = 0
stats['num_Valid'] = 0
stats['num_InvalidAS'] = 0
stats['num_InvalidLength'] = 0
stats['num_NotFound'] = 0
stats['num_Total'] = 0
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
pipeline = [
{"$match": {'value.type': 'announcement'}},
{"$group": {"_id": "$value.validated_route.validity.state", "count": {"$sum": 1}}}
]
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
for i in range(0, len(results)):
stats["num_"+results[i]['_id']] = results[i]['count']
stats['num_Total'] += results[i]['count']
stats['latest_ts'] = database.validity_latest.find_one(
projection={'value.timestamp': True, '_id': False},
sort=[('value.timestamp', DESCENDING)])['value']['timestamp']
stats['latest_dt'] = datetime.fromtimestamp(
int(stats['latest_ts'])).strftime('%Y-%m-%d %H:%M:%S')
except Exception as errmsg:
logging.exception("get_dash_stats, error: " + str(errmsg))
stats = None
# end try
# end if
return stats
def get_last24h_stats(dbconnstr, latest_ts):
client = MongoClient(dbconnstr)
database = client.get_default_database()
last24h = None
if "validity_stats" in database.collection_names() and database.validity_stats.count() > 0:
try:
ts24 = int(latest_ts) - (3600*24) # last 24h
last24h = list(database.validity_stats.find(
{'ts': {'$gt': ts24}},
{'_id':0}).sort('ts', DESCENDING))
except Exception as errmsg:
logging.exception("get_last24h_stats, error: " + str(errmsg))
last24h = None
# end try
# end if
return last24h
def get_validation_list(dbconnstr, state):
client = MongoClient(dbconnstr)
database = client.get_default_database()
rlist = []
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
results = database.validity_latest.find(
{'value.validated_route.validity.state' : state},
{'_id' : 0, 'value.type' : 0, 'value.timestamp' : 0})
for res in results:
data = dict()
data['prefix'] = res['value']['validated_route']['route']['prefix']
data['origin'] = res['value']['validated_route']['route']['origin_asn']
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
rlist.append(data)
except Exception as errmsg:
logging.exception("get_validation_list, error: " + str(errmsg))
return rlist
def get_validation_origin(dbconnstr, search_string):
rlist = None
client = MongoClient(dbconnstr)
database = client.get_default_database()
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
pipeline = [
{"$match": {'value.validated_route.route.origin_asn': search_string}}
]
# tmp_list = db.validity_latest.find(
# {'value.validated_route.route.origin_asn' : search_string},
# {'_id' : 0, 'value.type' : 0, 'value.timestamp' : 0})
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
except Exception as errmsg:
logging.exception("get_validation_origin failed with: " + str(errmsg))
else:
rlist = list()
for res in results:
data = dict()
data['prefix'] = res['value']['validated_route']['route']['prefix']
data['origin'] = res['value']['validated_route']['route']['origin_asn']
if res['value']['type'] == 'announcement':
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
data['roas'] = None
rlist.append(data)
# end try
# end if
return rlist
def get_validation_prefix(dbconnstr, search_string):
prefix = None
result = None
try:
ipa = IPNetwork(search_string).ip
except Exception as errmsg:
logging.exception("IP address parse failed with: " + str(errmsg))
else:
client = MongoClient(dbconnstr)
database = client.get_default_database()
while prefix is None:
try:
results = list(database.validity_latest.find({}, {'_id': 1}))
prefix = IPNetwork("0.0.0.0/0")
for res in results:
ipp = IPNetwork(res['_id'])
if (ipa in ipp) and (ipp.prefixlen > prefix.prefixlen):
prefix = ipp
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
prefix = None
# end try
# end while
try:
results = list(database.validity_latest.find({'_id': str(prefix)}))
rlist = list()
for res in results:
data = dict()
data['prefix'] = res['_id']
data['timestamp'] = res['value']['timestamp']
data['type'] = res['value']['type']
if data['type'] == 'announcement':
data['origin'] = res['value']['validated_route']['route']['origin_asn']
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
rlist.append(data)
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
rlist = None
# end try
return rlist
def get_validation_history(dbconnstr, search_prefix):
rlist = list()
client = MongoClient(dbconnstr)
database = client.get_default_database()
try:
results = database.archive.find(
{'prefix': search_prefix},
{'_id': 0},
sort=[('timestamp', DESCENDING)])
for res in results:
data = dict()
data['prefix'] = res['prefix']
data['timestamp'] = res['timestamp']
data['type'] = res['type']
if data['type'] == 'announcement':
data['origin'] = res['validated_route']['route']['origin_asn']
data['state'] = res['validated_route']['validity']['state']
data['roas'] = res['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
rlist.append(data)
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
return rlist
| 42 | 98 | 0.540963 | import logging
from datetime import datetime
from pymongo import MongoClient, DESCENDING
from netaddr import IPNetwork
def get_ipversion_stats(dbconnstr):
client = MongoClient(dbconnstr)
database = client.get_default_database()
if "validity_latest" not in database.collection_names() or \
database.validity_latest.count() == 0:
return None, None
types = ['num_', 'ips_']
ipv4_stats = dict()
for t in types:
ipv4_stats[t+'Valid'] = 0
ipv4_stats[t+'InvalidAS'] = 0
ipv4_stats[t+'InvalidLength'] = 0
ipv4_stats[t+'NotFound'] = 0
ipv4_stats['pfx_Valid'] = []
ipv4_stats['pfx_InvalidAS'] = []
ipv4_stats['pfx_InvalidLength'] = []
ipv4_stats['pfx_NotFound'] = []
ipv6_stats = dict()
for t in types:
ipv6_stats[t+'Valid'] = 0
ipv6_stats[t+'InvalidAS'] = 0
ipv6_stats[t+'InvalidLength'] = 0
ipv6_stats[t+'NotFound'] = 0
ipv6_stats['pfx_Valid'] = []
ipv6_stats['pfx_InvalidAS'] = []
ipv6_stats['pfx_InvalidLength'] = []
ipv6_stats['pfx_NotFound'] = []
try:
pipeline = [{"$group": {
"_id": '$value.validated_route.route.prefix',
"origins": {"$push": {
"asn": "$value.validated_route.route.origin_asn",
"validity": "$value.validated_route.validity.state"}}
}}]
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
for res in results:
if res['_id'] is None:
logging.debug("emtpy record, skipping")
continue
ipn = IPNetwork(res['_id'])
b_val = {"Valid": False,
"InvalidLength": False,
"InvalidAS": False,
"NotFound": False}
if ipn.version == 4:
for asn in res['origins']:
if "num_"+asn['validity'] in ipv4_stats:
ipv4_stats["num_"+asn['validity']] += 1
else:
ipv4_stats["num_"+asn['validity']] = 1
b_val[asn['validity']] = True
if b_val['Valid']:
ipv4_stats["ips_Valid"] += ipn.size
ipv4_stats["pfx_Valid"].append(ipn.prefixlen)
elif b_val['InvalidLength']:
ipv4_stats["ips_InvalidLength"] += ipn.size
ipv4_stats["pfx_InvalidLength"].append(ipn.prefixlen)
elif b_val['InvalidAS']:
ipv4_stats["ips_InvalidAS"] += ipn.size
ipv4_stats["pfx_InvalidAS"].append(ipn.prefixlen)
elif b_val['NotFound']:
ipv4_stats["ips_NotFound"] += ipn.size
ipv4_stats["pfx_NotFound"].append(ipn.prefixlen)
elif ipn.version == 6:
for asn in res['origins']:
if "num_"+asn['validity'] in ipv6_stats:
ipv6_stats["num_"+asn['validity']] += 1
else:
ipv6_stats["num_"+asn['validity']] = 1
b_val[asn['validity']] = True
if b_val['Valid']:
ipv6_stats["ips_Valid"] += ipn.size
ipv6_stats["pfx_Valid"].append(ipn.prefixlen)
elif b_val['InvalidLength']:
ipv6_stats["ips_InvalidLength"] += ipn.size
ipv6_stats["pfx_InvalidLength"].append(ipn.prefixlen)
elif b_val['InvalidAS']:
ipv6_stats["ips_InvalidAS"] += ipn.size
ipv6_stats["pfx_InvalidAS"].append(ipn.prefixlen)
elif b_val['NotFound']:
ipv6_stats["ips_NotFound"] += ipn.size
ipv6_stats["pfx_NotFound"].append(ipn.prefixlen)
except Exception as errmsg:
logging.exception("get_ipversion_stats, error: " + str(errmsg))
ipv4_stats = None
ipv6_stats = None
return ipv4_stats, ipv6_stats
def get_dash_stats(dbconnstr):
client = MongoClient(dbconnstr)
database = client.get_default_database()
stats = dict()
stats['latest_dt'] = 'now'
stats['latest_ts'] = 0
stats['num_Valid'] = 0
stats['num_InvalidAS'] = 0
stats['num_InvalidLength'] = 0
stats['num_NotFound'] = 0
stats['num_Total'] = 0
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
pipeline = [
{"$match": {'value.type': 'announcement'}},
{"$group": {"_id": "$value.validated_route.validity.state", "count": {"$sum": 1}}}
]
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
for i in range(0, len(results)):
stats["num_"+results[i]['_id']] = results[i]['count']
stats['num_Total'] += results[i]['count']
stats['latest_ts'] = database.validity_latest.find_one(
projection={'value.timestamp': True, '_id': False},
sort=[('value.timestamp', DESCENDING)])['value']['timestamp']
stats['latest_dt'] = datetime.fromtimestamp(
int(stats['latest_ts'])).strftime('%Y-%m-%d %H:%M:%S')
except Exception as errmsg:
logging.exception("get_dash_stats, error: " + str(errmsg))
stats = None
return stats
def get_last24h_stats(dbconnstr, latest_ts):
client = MongoClient(dbconnstr)
database = client.get_default_database()
last24h = None
if "validity_stats" in database.collection_names() and database.validity_stats.count() > 0:
try:
ts24 = int(latest_ts) - (3600*24)
last24h = list(database.validity_stats.find(
{'ts': {'$gt': ts24}},
{'_id':0}).sort('ts', DESCENDING))
except Exception as errmsg:
logging.exception("get_last24h_stats, error: " + str(errmsg))
last24h = None
return last24h
def get_validation_list(dbconnstr, state):
client = MongoClient(dbconnstr)
database = client.get_default_database()
rlist = []
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
results = database.validity_latest.find(
{'value.validated_route.validity.state' : state},
{'_id' : 0, 'value.type' : 0, 'value.timestamp' : 0})
for res in results:
data = dict()
data['prefix'] = res['value']['validated_route']['route']['prefix']
data['origin'] = res['value']['validated_route']['route']['origin_asn']
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
rlist.append(data)
except Exception as errmsg:
logging.exception("get_validation_list, error: " + str(errmsg))
return rlist
def get_validation_origin(dbconnstr, search_string):
rlist = None
client = MongoClient(dbconnstr)
database = client.get_default_database()
if "validity_latest" in database.collection_names() and database.validity_latest.count() > 0:
try:
pipeline = [
{"$match": {'value.validated_route.route.origin_asn': search_string}}
]
results = list(database.validity_latest.aggregate(pipeline, allowDiskUse=True))
except Exception as errmsg:
logging.exception("get_validation_origin failed with: " + str(errmsg))
else:
rlist = list()
for res in results:
data = dict()
data['prefix'] = res['value']['validated_route']['route']['prefix']
data['origin'] = res['value']['validated_route']['route']['origin_asn']
if res['value']['type'] == 'announcement':
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
data['roas'] = None
rlist.append(data)
return rlist
def get_validation_prefix(dbconnstr, search_string):
prefix = None
result = None
try:
ipa = IPNetwork(search_string).ip
except Exception as errmsg:
logging.exception("IP address parse failed with: " + str(errmsg))
else:
client = MongoClient(dbconnstr)
database = client.get_default_database()
while prefix is None:
try:
results = list(database.validity_latest.find({}, {'_id': 1}))
prefix = IPNetwork("0.0.0.0/0")
for res in results:
ipp = IPNetwork(res['_id'])
if (ipa in ipp) and (ipp.prefixlen > prefix.prefixlen):
prefix = ipp
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
prefix = None
try:
results = list(database.validity_latest.find({'_id': str(prefix)}))
rlist = list()
for res in results:
data = dict()
data['prefix'] = res['_id']
data['timestamp'] = res['value']['timestamp']
data['type'] = res['value']['type']
if data['type'] == 'announcement':
data['origin'] = res['value']['validated_route']['route']['origin_asn']
data['state'] = res['value']['validated_route']['validity']['state']
data['roas'] = res['value']['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
rlist.append(data)
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
rlist = None
return rlist
def get_validation_history(dbconnstr, search_prefix):
rlist = list()
client = MongoClient(dbconnstr)
database = client.get_default_database()
try:
results = database.archive.find(
{'prefix': search_prefix},
{'_id': 0},
sort=[('timestamp', DESCENDING)])
for res in results:
data = dict()
data['prefix'] = res['prefix']
data['timestamp'] = res['timestamp']
data['type'] = res['type']
if data['type'] == 'announcement':
data['origin'] = res['validated_route']['route']['origin_asn']
data['state'] = res['validated_route']['validity']['state']
data['roas'] = res['validated_route']['validity']['VRPs']
else:
data['state'] = 'withdraw'
rlist.append(data)
except Exception as errmsg:
logging.exception("SEARCH failed with: " + str(errmsg))
return rlist
| true | true |
1c31f49fe0392d66c30abd8428803d2c2bbee716 | 46,197 | py | Python | tests/core/test_model.py | jld23/sasoptpy | f96911f04d6c0c01fce902f1f995935583df69a8 | [
"Apache-2.0"
] | null | null | null | tests/core/test_model.py | jld23/sasoptpy | f96911f04d6c0c01fce902f1f995935583df69a8 | [
"Apache-2.0"
] | null | null | null | tests/core/test_model.py | jld23/sasoptpy | f96911f04d6c0c01fce902f1f995935583df69a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for core classes.
"""
from collections import OrderedDict
from difflib import SequenceMatcher
import inspect
import os
import unittest
import warnings
from inspect import cleandoc
import sasoptpy as so
from tests.swat_config import create_cas_connection
class MockSASconfig:
def __init__(self, name):
self.name = name
class SASsession:
def __init__(self, cfgname):
import saspy
self.sascfg = MockSASconfig(name=cfgname)
class TestModel(unittest.TestCase):
"""
Unit tests for :class:`sasoptpy.Model` objects
"""
@classmethod
def setUpClass(cls):
cls.conn = None
from swat import CAS, SWATError
try:
cls.conn = create_cas_connection()
except SWATError:
warnings.warn('CAS connection is not available',
RuntimeWarning)
except TypeError:
warnings.warn('CAS variables are not available',
RuntimeWarning)
@classmethod
def tearDownClass(cls):
if cls.conn is not None:
cls.conn.close()
def setUp(self):
pass
@classmethod
def get_standard_model(cls, name):
m = so.Model(name=name)
x = m.add_variable(name='x')
y = m.add_variables(2, name='y')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraints((y[i] <= 3 for i in range(2)), name='c2')
return m
def test_initialize(self):
m = so.Model(name='test_initialize', session=None)
self.assertEqual(type(m), so.Model)
def test_comparison(self):
model1 = so.Model(name='test_equal_1', session=None)
model2 = so.Model(name='test_equal_2', session=None)
self.assertFalse(model1 == model2)
model3 = model1
self.assertTrue(model1 == model3)
def invalid_comparison():
_ = model1 == list()
self.assertWarns(RuntimeWarning, invalid_comparison)
def test_get_name(self):
m = so.Model(name='m')
self.assertEqual(m.get_name(), 'm')
def test_adding_variable(self):
m = so.Model(name='test_add_variable')
x = m.add_variable(name='x')
y = m.add_variable(name='y', vartype=so.INT)
z = m.add_variable(name='z', lb=1, ub=10)
w = m.add_variable(name='w', init=5)
u = so.Variable(name='u')
m.include(u)
self.assertEqual(m.get_variables(), [x, y, z, w, u])
self.assertEqual(m.get_variable_dict(), {'x': x, 'y': y, 'z': z,
'w': w, 'u': u})
self.assertIs(m.get_variable('x'), x)
self.assertIs(m.get_variable('t'), None)
def test_duplicate_variables(self):
m = so.Model(name='test_duplicate_variables')
def add_multi_var():
x = m.add_variable(name='x', lb=2)
x2 = m.add_variable(name='x', lb=1)
self.assertWarns(UserWarning, add_multi_var)
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
min test_duplicate_variables_obj = 0;
var x >= 1;
solve;
quit;"""))
def test_dropping_variable(self):
m = so.Model(name='test_drop_variable')
x = m.add_variable(name='x')
self.assertIs(m.get_variables()[0], x)
self.assertIs(m.get_variable_dict()['x'], x)
m.drop_variable(x)
self.assertEqual(m.get_variables(), [])
self.assertEqual(m.get_variable_dict(), {})
m.include(x)
self.assertIs(m.get_variable_dict()['x'], x)
m.drop(x)
self.assertEqual(m.get_variable_dict(), {})
def test_drop_restore_var(self):
m = so.Model(name='test_drop_restore')
x = m.add_variable(name='x')
y = m.add_variables(5, name='y')
m.set_objective(y[3], sense=so.minimize, name='obj')
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
solve;
quit;'''))
m.drop_variable(x)
m.drop_variable(y[1])
m.drop_variable(y[2])
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var y {{0,1,2,3,4}};
min obj = y[3];
drop y[1] y[2];
solve;
quit;'''))
m.restore_variable(x)
m.restore_variable(y[2])
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
drop y[1];
solve;
quit;'''))
def test_adding_vargroup(self):
m = so.Model(name='test_add_vg')
x = m.add_variables(2, name='x')
y = m.add_variables(['a', 'b'], name='y', vartype=so.BIN)
I = so.abstract.Set(name='I')
z = m.add_variables(I, name='z', lb=1, ub=10, init=5)
w = so.VariableGroup(5, name='w')
m.include(w)
vars = [('x', x), ('y', y), ('z', z), ('w', w)]
self.assertEqual(m.get_grouped_variables(), OrderedDict(vars))
self.assertIs(m.get_variable('x')[0], x[0])
def test_dropping_vargroup(self):
m = so.Model(name='test_drop_vg')
x = m.add_variables(2, name='x')
self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))
m.drop_variables(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict())
m.include(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))
m.drop(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict())
def test_adding_constraint(self):
m = so.Model(name='test_add_constraint')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraint(2 * x + x ** 5 >= 1, name='c2')
self.assertEqual([c1, c2], m.get_constraints())
self.assertEqual({'c1': c1, 'c2': c2}, m.get_constraints_dict())
def invalid_constraint():
from math import inf
c3 = m.add_constraint(x <= inf, name='c3')
self.assertRaises(ValueError, invalid_constraint)
cx = m.get_constraint('c1')
self.assertEqual(cx, c1)
cy = m.get_constraint('c3')
self.assertEqual(cy, None)
def test_duplicate_constraints(self):
m = so.Model(name='test_duplicate_constraints')
def add_multi_con():
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c')
c2 = m.add_constraint(x <= 5, name='c')
self.assertWarns(UserWarning, add_multi_con)
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
min test_duplicate_constraints_obj = 0;
var x;
con c : x <= 5;
solve;
quit;"""))
def test_drop_restore_cons(self):
m = so.Model(name='test_drop_restore_constraints')
x = m.add_variable(name='x')
y = m.add_variables(5, name='y')
m.set_objective(y[3], sense=so.minimize, name='obj')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraints((y[i] <= i for i in range(5)), name='c2')
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c1 : x <= 5;
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
solve;
quit;"""))
m.drop_constraint(c1)
m.drop_constraint(c2[1])
m.drop_constraint(c2[2])
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
drop c2_1 c2_2;
solve;
quit;"""))
m.restore_constraint(c1)
m.restore_constraint(c2[2])
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c1 : x <= 5;
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
drop c2_1;
solve;
quit;"""))
def test_dropping_constraint(self):
m = so.Model(name='test_drop_constraint')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c1')
self.assertEqual({'c1': c1}, m.get_constraints_dict())
m.drop_constraint(c1)
self.assertEqual({}, m.get_constraints_dict())
m.include(c1)
self.assertEqual({'c1': c1}, m.get_constraints_dict())
m.drop(c1)
self.assertEqual({}, m.get_constraints_dict())
def test_adding_constraints(self):
m = so.Model(name='test_add_cg')
x = m.add_variables(5, name='x')
c1 = m.add_constraints((x[i] >= i for i in range(5)), name='c1')
self.assertEqual(OrderedDict([('c1', c1)]), m.get_grouped_constraints())
self.assertEqual(c1, m.get_constraint('c1'))
c2 = so.ConstraintGroup((i * x[i] <= 10 for i in range(5)), name='c2')
m.include(c2)
grouped_con_dict = OrderedDict([('c1', c1), ('c2', c2)])
self.assertEqual(grouped_con_dict, m.get_grouped_constraints())
def warn_user_single_constraint():
c3 = m.add_constraints(x[0] >= 1, name='c3')
self.assertWarns(UserWarning, warn_user_single_constraint)
def test_dropping_constraints(self):
m = so.Model(name='test_drop_cg')
x = m.add_variables(2, name='x')
c1 = m.add_constraints((x[i] <= i for i in range(2)), name='c1')
self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))
m.drop_constraints(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict())
m.include(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))
m.drop(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict())
def test_add_set(self):
m = so.Model(name='test_add_set')
I = m.add_set(name='I', init=2)
self.assertEqual(m.get_sets(), [I])
self.assertEqual(so.to_definition(m.get_sets()[0]), "set I init 2;")
def test_add_parameter(self):
m = so.Model(name='test_add_parameter')
p = m.add_parameter(name='p', init=10)
I = m.add_set(name='I')
r = m.add_parameter(I, name='r', init=5)
self.assertEqual([p, r], m.get_parameters())
m.drop(r)
self.assertEqual([p], m.get_parameters())
def test_add_implicit_var(self):
m = so.Model(name='test_add_impvar')
x = m.add_variables(5, name='x')
y = m.add_implicit_variable((i * x[i] + x[i] ** 2 for i in range(5)),
name='y')
self.assertEqual([y], m.get_implicit_variables())
def test_add_literal_statement(self):
m = so.Model(name='test_add_literal_statement')
m.set_objective(0, name='empty_obj')
m.add_statement('var x {0,1};')
m.add_statement('solve;')
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
quit;'''))
s = so.abstract.LiteralStatement('print x;')
m.include(s)
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
print x;
quit;'''))
m.drop(s)
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
quit;'''))
def test_add_abstract_statement(self):
m = so.Model(name='m')
x = m.add_variable(name='x')
m.set_objective(x ** 2, sense=so.MIN, name='obj')
s = so.abstract.LiteralStatement('expand;')
m.add_statement(s)
self.assertEqual(so.to_optmodel(m), inspect.cleandoc("""
proc optmodel;
var x;
min obj = (x) ^ (2);
expand;
solve;
quit;
"""))
def test_postsolve_statement(self):
m = so.Model(name='test_postsolve_statement')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 10, name='c1')
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
quit;"""))
m.add_postsolve_statement('print x;')
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
print x;
quit;"""))
m.add_postsolve_statement(so.abstract.LiteralStatement('expand;'))
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
print x;
expand;
quit;"""))
def test_include_model(self):
m1 = so.Model(name='test_copy_model_1')
x = m1.add_variable(name='x')
y = m1.add_variables(2, name='y')
c1 = m1.add_constraint(x + y[0] >= 2, name='c1')
c2 = m1.add_constraints((x - y[i] <= 10 for i in range(2)), name='c2')
m1.set_objective(2 * x + y[0] + 3 * y[1], name='model_obj')
m2 = so.Model(name='test_copy_model_2')
m2.include(m1)
vars = OrderedDict([('x', x), ('y', y)])
self.assertEqual(m2.get_grouped_variables(), vars)
cons = OrderedDict([('c1', c1), ('c2', c2)])
self.assertEqual(m2.get_grouped_constraints(), cons)
self.assertEqual(m2.to_optmodel(),inspect.cleandoc("""
proc optmodel;
var x;
var y {{0,1}};
con c1 : x + y[0] >= 2;
con c2_0 : x - y[0] <= 10;
con c2_1 : x - y[1] <= 10;
min model_obj = 2 * x + y[0] + 3 * y[1];
solve;
quit;"""))
def test_set_get_objective(self):
m = so.Model(name='test_set_get_objective')
x = m.add_variable(name='x')
# Regular objective
obj1 = m.set_objective(2 * x, sense=so.MIN, name='obj1')
self.assertIs(obj1, m.get_objective())
# Multi objective
obj2 = m.set_objective(5 * x, sense=so.MIN, name='obj2')
self.assertIs(obj2, m.get_objective())
obj3 = m.append_objective(10 * x, sense=so.MIN, name='obj3')
self.assertEqual([obj2, obj3], m.get_all_objectives())
self.assertEqual(
m.to_optmodel(),
inspect.cleandoc("""
proc optmodel;
var x;
min obj2 = 5 * x;
min obj3 = 10 * x;
solve;
quit;"""))
def test_get_objective_value(self):
m = so.Model(name='test_objective_value')
x = m.add_variable(name='x')
m.set_objective(x ** 2 - 4 * x + 5, sense=so.MIN, name='nonlinear')
x.set_value(3)
self.assertEqual(m.get_objective_value(), 2)
if TestModel.conn:
m.set_session(TestModel.conn)
m.solve()
self.assertEqual(m.get_objective_value(), 1)
self.assertEqual(x.get_value(), 2)
else:
self.skipTest('No CAS connection available, skipping ' +
'objective value test')
def zero_div_error():
m.set_objective(x / x, sense=so.MIN, name='nonlinear2')
x.set_value(0)
m.clear_solution()
m.get_objective_value()
self.assertRaises(ZeroDivisionError, zero_div_error)
def test_variable_coef(self):
m = so.Model(name='test_get_variable_coef')
x = m.add_variable(name='x')
m.set_objective(5 * x, sense=so.MIN, name='obj1')
self.assertEqual(m.get_variable_coef(x), 5)
self.assertEqual(m.get_variable_coef('x'), 5)
y = so.Variable(name='y')
def variable_not_in_model():
return m.get_variable_coef(y)
self.assertRaises(RuntimeError, variable_not_in_model)
m.set_objective(2 * x + y ** 2, sense=so.MIN, name='obj1')
self.assertEqual(m.get_variable_coef('x'), 2)
def nonlinear_objective():
return m.get_variable_coef('y')
self.assertWarns(RuntimeWarning, nonlinear_objective)
def test_get_variable_value(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
m = so.Model(name='test_get_var_value')
x = m.add_variable(name='x', lb=1.5, ub=10, vartype=so.INT)
m.set_objective(x, sense=so.MIN, name='obj1')
m.set_session(TestModel.conn)
m.solve(verbose=True)
self.assertEqual(m.get_variable_value(x), 2)
I = m.add_set(name='I', value=range(2))
y = m.add_variables(I, name='y', lb=0.5)
m.set_objective(x + y[0] + y[1], sense=so.MIN, name='obj1')
m.solve()
self.assertEqual(m.get_variable_value(y[0]), 0.5)
def get_variable_warning():
self.assertEqual(m.get_variable_value('z'), None)
self.assertWarns(UserWarning, get_variable_warning)
m2 = so.Model(name='test_get_var_value_copy')
m2.include(m)
z = so.Variable(name='z')
def raise_solution_error():
return m2.get_variable_value(z)
self.assertRaises(RuntimeError, raise_solution_error)
m.add_variable(name='var with invalid name')
def raise_syntax_error():
return m.solve()
self.assertRaises(SyntaxError, raise_syntax_error)
def test_get_variable_value_abstract(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
import pandas as pd
so.reset()
m = so.Model(name='abstract_model')
df = pd.DataFrame([
['a', 1],
['b', 2]
], columns=['tag', 'val'])
idx = so.Set(name='idx', settype=so.STR)
varlb = so.ParameterGroup(idx, name='varlb')
m.include(idx, varlb)
table = TestModel.conn.upload_frame(df, casout='server_data')
from sasoptpy.actions import read_data
r = read_data(
table=table,
index={'target': idx, 'key': 'tag'},
columns=[
{'target': varlb, 'column': 'val'}
]
)
m.include(r)
y = so.VariableGroup(idx, name='y')
c = so.ConstraintGroup((y[i] >= varlb[i] for i in idx), name='c')
m.include(y, c)
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min abstract_model_obj = 0;
set <str> idx;
num varlb {idx};
read data SERVER_DATA into idx=[tag] varlb=val;
var y {{idx}};
con c {o8 in idx} : y[o8] - varlb[o8] >= 0;
solve;
quit;
"""))
m.set_session(TestModel.conn)
m.solve()
self.assertEqual(m.get_variable_value(y['a']), 1)
self.assertEqual(m.get_statements(), [r])
def test_get_summaries(self):
if not TestModel.conn:
self.skipTest('Session is not available')
m = so.Model(name='test_get_summaries', session=TestModel.conn)
x = m.add_variable(name='x', lb=1)
y = m.add_variables(2, name='y', lb=1)
m.set_objective(x + y[0], sense=so.MIN, name='obj1')
m.add_constraint(x + 2 *y[0] + 3*y[1] >= 10, name='con1')
m.solve()
self.assertEqual(m.get_problem_summary().to_string(),
inspect.cleandoc("""
Value
Label
Objective Sense Minimization
Objective Function obj1
Objective Type Linear
Number of Variables 3
Bounded Above 0
Bounded Below 3
Bounded Below and Above 0
Free 0
Fixed 0
Number of Constraints 1
Linear LE (<=) 0
Linear EQ (=) 0
Linear GE (>=) 1
Linear Range 0
Constraint Coefficients 3"""))
seq = SequenceMatcher(None, m.get_solution_summary().to_string(),
inspect.cleandoc(
"""
Value
Label
Solver LP
Algorithm Dual Simplex
Objective Function obj1
Solution Status Optimal
Objective Value 2
Primal Infeasibility 0
Dual Infeasibility 0
Bound Infeasibility 0
Iterations 0
Presolve Time 0.00
Solution Time 0.00"""
))
# There is a chance that the solution time is slightly different
self.assertTrue(seq.ratio() > 0.99)
def test_get_solution(self):
if not TestModel.conn:
self.skipTest('No session is defined, skipping get solution test')
import pandas as pd
m = so.Model(name='test_get_soln', session=TestModel.conn)
data = [
['pen', 1, 3, 11],
['mug', 15, 10, 5],
['watch', 50, 2, 2],
['pc', 1500, 200, 1]
]
data = pd.DataFrame(data, columns=['item', 'value', 'weight', 'ub'])
data = data.set_index(['item'])
items = data.index
get = m.add_variables(items, name='get', vartype=so.INT, lb=0)
value = data['value']
weight = data['weight']
ub = data['ub']
m.set_objective(so.expr_sum(get[i] * value[i] for i in items),
sense=so.MAX, name='obj1')
m.add_constraint(so.expr_sum(get[i] * weight[i] for i in items)
<= 210, name='value_total')
m.add_constraints((get[i] <= ub[i] for i in items), name='upper_bound')
# Regular solve and regular get
m.solve(verbose=True)
self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(
"""
i var value lb ub rc
0 1.0 get[pen] 2.0 -0.0 1.797693e+308 NaN
1 2.0 get[mug] -0.0 -0.0 1.797693e+308 NaN
2 3.0 get[watch] 2.0 -0.0 1.797693e+308 NaN
3 4.0 get[pc] 1.0 -0.0 1.797693e+308 NaN
"""
))
self.assertEqual(m.get_solution(vtype='dual').to_string(),
inspect.cleandoc(
"""
j con value dual
0 1.0 value_total 210.0 NaN
1 2.0 upper_bound_pen 2.0 NaN
2 3.0 upper_bound_mug -0.0 NaN
3 4.0 upper_bound_watch 2.0 NaN
4 5.0 upper_bound_pc 1.0 NaN
"""
))
m.solve(mps=True, options={'maxpoolsols': 3}, verbose=True)
self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(
"""
var lb ub value solution
0 get[pen] 0.0 1.797693e+308 2.0 1.0
1 get[mug] 0.0 1.797693e+308 0.0 1.0
2 get[watch] 0.0 1.797693e+308 2.0 1.0
3 get[pc] 0.0 1.797693e+308 1.0 1.0
4 get[pen] 0.0 1.797693e+308 1.0 2.0
5 get[mug] 0.0 1.797693e+308 0.0 2.0
6 get[watch] 0.0 1.797693e+308 1.0 2.0
7 get[pc] 0.0 1.797693e+308 1.0 2.0
8 get[pen] 0.0 1.797693e+308 0.0 3.0
9 get[mug] 0.0 1.797693e+308 0.0 3.0
10 get[watch] 0.0 1.797693e+308 0.0 3.0
11 get[pc] 0.0 1.797693e+308 0.0 3.0
"""
))
self.assertEqual(m.get_solution('dual').to_string(), inspect.cleandoc(
"""
con value solution
0 value_total 210.0 1.0
1 upper_bound['pen'] 2.0 1.0
2 upper_bound['mug'] 0.0 1.0
3 upper_bound['watch'] 2.0 1.0
4 upper_bound['pc'] 1.0 1.0
5 value_total 205.0 2.0
6 upper_bound['pen'] 1.0 2.0
7 upper_bound['mug'] 0.0 2.0
8 upper_bound['watch'] 1.0 2.0
9 upper_bound['pc'] 1.0 2.0
10 value_total 0.0 3.0
11 upper_bound['pen'] 0.0 3.0
12 upper_bound['mug'] 0.0 3.0
13 upper_bound['watch'] 0.0 3.0
14 upper_bound['pc'] 0.0 3.0
"""
))
self.assertEqual(m.get_solution(pivot=True).to_string(),
inspect.cleandoc(
"""
solution 1.0 2.0 3.0
var
get[mug] 0.0 0.0 0.0
get[pc] 1.0 1.0 0.0
get[pen] 2.0 1.0 0.0
get[watch] 2.0 1.0 0.0
"""
))
self.assertEqual(m.get_solution('dual', pivot=True).to_string(),
inspect.cleandoc(
"""
solution 1.0 2.0 3.0
con
upper_bound['mug'] 0.0 0.0 0.0
upper_bound['pc'] 1.0 1.0 0.0
upper_bound['pen'] 2.0 1.0 0.0
upper_bound['watch'] 2.0 1.0 0.0
value_total 210.0 205.0 0.0
"""
))
self.assertEqual(m.get_solution('primal', solution=2).to_string(),
inspect.cleandoc(
"""
var lb ub value solution
4 get[pen] 0.0 1.797693e+308 1.0 2.0
5 get[mug] 0.0 1.797693e+308 0.0 2.0
6 get[watch] 0.0 1.797693e+308 1.0 2.0
7 get[pc] 0.0 1.797693e+308 1.0 2.0
"""
))
self.assertEqual(m.get_solution('dual', solution=3).to_string(),
inspect.cleandoc(
"""
con value solution
10 value_total 0.0 3.0
11 upper_bound['pen'] 0.0 3.0
12 upper_bound['mug'] 0.0 3.0
13 upper_bound['watch'] 0.0 3.0
14 upper_bound['pc'] 0.0 3.0
"""
))
m.print_solution()
def third_type():
m.get_solution('x')
self.assertRaises(ValueError, third_type)
def test_set_coef(self):
m = so.Model(name='test_set_coef')
x = m.add_variable(name='x')
y = m.add_variables(2, name='y')
z = m.add_variable(name='z')
obj = m.set_objective(2*x + 3*y[0] + 2*y[1], name='obj', sense=so.MIN)
c1 = m.add_constraint(2* x + 5 * y[0] + 7 * y[1] <= 15, name='c1')
self.assertEqual(m.get_variable_coef(x), 2)
m.set_variable_coef(x, 3)
self.assertEqual(m.get_variable_coef(x), 3)
self.assertEqual(m.get_variable_coef(z), 0)
m.set_variable_coef(z, 1)
self.assertEqual(m.get_variable_coef(z), 1)
def test_to_mps(self):
m = so.Model(name='test_to_mps')
x = m.add_variable(name='x', lb=0, ub=5, vartype=so.INT)
y = m.add_variables(2, name='y', lb=1)
m.set_objective(x + y[0], sense=so.MIN, name='xyobj')
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MIN xyobj NaN NaN 3
3 COLUMNS NaN NaN 4
4 MARK0000 'MARKER' NaN 'INTORG' NaN 5
5 x xyobj 1.0 NaN 6
6 MARK0001 'MARKER' NaN 'INTEND' NaN 7
7 y[0] xyobj 1.0 NaN 8
8 y[1] xyobj 0.0 NaN 9
9 RHS NaN NaN 10
10 RANGES NaN NaN 11
11 BOUNDS NaN NaN 12
12 LO BND x 0.0 NaN 13
13 UP BND x 5.0 NaN 14
14 LO BND y[0] 1.0 NaN 15
15 LO BND y[1] 1.0 NaN 16
16 ENDATA 0.0 0.0 17
"""
))
m.set_objective(x + 10, name='o', sense=so.MAX)
self.assertEqual(m.to_mps(constant=True).to_string(),
inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 COLUMNS NaN NaN 4
4 MARK0000 'MARKER' NaN 'INTORG' NaN 5
5 x o_constant 1.0 NaN 6
6 MARK0001 'MARKER' NaN 'INTEND' NaN 7
7 y[0] o_constant 0.0 NaN 8
8 y[1] o_constant 0.0 NaN 9
9 obj_constant o_constant 1.0 NaN 10
10 RHS NaN NaN 11
11 RANGES NaN NaN 12
12 BOUNDS NaN NaN 13
13 LO BND x 0.0 NaN 14
14 UP BND x 5.0 NaN 15
15 LO BND y[0] 1.0 NaN 16
16 LO BND y[1] 1.0 NaN 17
17 FX BND obj_constant 10.0 NaN 18
18 ENDATA 0.0 0.0 19
"""
))
# Add invalid constraints for the frame
c1 = m.add_constraint(y[0] + x >= 0, name='zero_lb')
c2 = m.add_constraint(y[0] <= 100, name='inf_ub')
from math import inf
c2.set_rhs(inf)
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 G zero_lb NaN NaN 4
4 L inf_ub NaN NaN 5
5 COLUMNS NaN NaN 6
6 MARK0000 'MARKER' NaN 'INTORG' NaN 7
7 x o_constant 1.0 zero_lb 1.0 8
8 MARK0001 'MARKER' NaN 'INTEND' NaN 9
9 y[0] zero_lb 1.0 inf_ub 1.0 10
10 y[1] o_constant 0.0 NaN 11
11 obj_constant o_constant 1.0 NaN 12
12 RHS NaN NaN 13
13 RANGES NaN NaN 14
14 BOUNDS NaN NaN 15
15 LO BND x 0.0 NaN 16
16 UP BND x 5.0 NaN 17
17 LO BND y[0] 1.0 NaN 18
18 LO BND y[1] 1.0 NaN 19
19 FX BND obj_constant 10.0 NaN 20
20 ENDATA 0.0 0.0 21
"""
))
u = m.add_variable(name='u')
t = m.add_variable(name='t', vartype=so.BIN)
m.drop_constraints(c1, c2)
m.add_constraint(x + 2*y[0] == [3, 8], name='range_con')
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 E range_con NaN NaN 4
4 COLUMNS NaN NaN 5
5 MARK0000 'MARKER' NaN 'INTORG' NaN 6
6 x o_constant 1.0 range_con 1.0 7
7 MARK0001 'MARKER' NaN 'INTEND' NaN 8
8 y[0] range_con 2.0 NaN 9
9 y[1] o_constant 0.0 NaN 10
10 obj_constant o_constant 1.0 NaN 11
11 u o_constant 0.0 NaN 12
12 t o_constant 0.0 NaN 13
13 RHS NaN NaN 14
14 RHS range_con 3.0 NaN 15
15 RANGES NaN NaN 16
16 rng range_con 5.0 NaN 17
17 BOUNDS NaN NaN 18
18 LO BND x 0.0 NaN 19
19 UP BND x 5.0 NaN 20
20 LO BND y[0] 1.0 NaN 21
21 LO BND y[1] 1.0 NaN 22
22 FX BND obj_constant 10.0 NaN 23
23 FR BND u NaN NaN 24
24 BV BND t 1.0 NaN 25
25 ENDATA 0.0 0.0 26
"""
))
def get_frame_warning():
r = m.to_frame()
self.assertWarns(DeprecationWarning, get_frame_warning)
def test_to_optmodel(self):
m = so.Model(name='test_to_optmodel')
self.assertEqual(m.to_optmodel(), inspect.cleandoc(
"""
proc optmodel;
min test_to_optmodel_obj = 0;
solve;
quit;
"""
))
x = m.add_variable(name='x', init=5)
e1 = m.set_objective(x, sense=so.MIN, name='e1')
e2 = m.append_objective(x**2, sense=so.MAX, name='e2')
response = m.to_optmodel(options={
'with': 'blackbox',
'relaxint': True,
'obj': (e1, e2),
'primalin': True,
}, ods=True, primalin=True, parse=False)
self.assertEqual(response, inspect.cleandoc(
"""
proc optmodel;
var x init 5;
min e1 = x;
max e2 = (x) ^ (2);
solve with blackbox relaxint obj (e1 e2) / primalin;
ods output PrintTable=primal_out;
ods output PrintTable=dual_out;
create data allsols from [s]=(1.._NVAR_) name=_VAR_[s].name {j in 1.._NSOL_} <col('sol_'||j)=_VAR_[s].sol[j]>;
quit;
"""
))
response = m.to_optmodel(options={
'with': 'nlp',
'multistart': {'loglevel': 3, 'maxstarts': 30}
})
self.assertEqual(response, inspect.cleandoc(
"""
proc optmodel;
var x init 5;
min e1 = x;
max e2 = (x) ^ (2);
solve with nlp / multistart=(loglevel=3,maxstarts=30);
quit;
"""
))
def test_str(self):
m = TestModel.get_standard_model(name='test_model_str')
response = str(m)
self.assertEqual(response, inspect.cleandoc(
"""
Model: [
Name: test_model_str
Objective: MIN [0]
Variables (3): [
x
y[0]
y[1]
]
Constraints (3): [
x <= 5
y[0] <= 3
y[1] <= 3
]
]
"""
))
if TestModel.conn:
m.set_session(TestModel.conn)
response = str(m)
self.assertEqual(response, inspect.cleandoc(
"""
Model: [
Name: test_model_str
Session: {}:{}
Objective: MIN [0]
Variables (3): [
x
y[0]
y[1]
]
Constraints (3): [
x <= 5
y[0] <= 3
y[1] <= 3
]
]
""".format(os.environ.get('CASHOST'), os.environ.get('CASPORT'))
))
def test_model_repr(self):
m = so.Model(name='test_model_repr')
self.assertEqual(repr(m), "sasoptpy.Model(name='test_model_repr')")
s = SASsession(cfgname='winlocal')
m.set_session(s)
self.assertEqual(
repr(m),
"sasoptpy.Model(name='test_model_repr', "
"session=saspy.SASsession(cfgname='winlocal'))")
if TestModel.conn:
m.set_session(TestModel.conn)
cas_repr = repr(m.get_session())
self.assertEqual(
repr(m), "sasoptpy.Model(name='test_model_repr', session=" +
cas_repr + ')')
def invalid_session_type():
w = 5
m.set_session(w)
rp = repr(m)
self.assertRaises(TypeError, invalid_session_type)
def test_defn(self):
m = TestModel.get_standard_model('test_model_defn')
self.assertEqual(so.to_definition(m), "problem test_model_defn "
"include x y c1 c2;")
def test_expr(self):
m = TestModel.get_standard_model('test_model_expr')
self.assertEqual(m.to_optmodel(), so.to_expression(m))
def test_is_linear(self):
m = TestModel.get_standard_model('test_model_linearity')
self.assertEqual(so.is_linear(m), True)
x = m.get_variable('x')
qbound = m.add_constraint(x ** 2 + x <= 10, name='qbound')
self.assertEqual(so.is_linear(m), False)
m.drop_constraint(qbound)
self.assertEqual(so.is_linear(m), True)
m.set_objective(x ** 2, sense=so.MIN, name='x_squared')
self.assertEqual(so.is_linear(m), False)
def test_session_type(self):
m = TestModel.get_standard_model('test_model_session_type')
self.assertEqual(m.get_session_type(), None)
if TestModel.conn:
m.set_session(TestModel.conn)
self.assertEqual(m.get_session_type(), 'CAS')
def test_ub_set(self):
m = so.Model(name='test_model_var_ub')
x = m.add_variable(name='x')
self.assertEqual(so.to_optmodel(m), cleandoc('''
proc optmodel;
min test_model_var_ub_obj = 0;
var x;
solve;
quit;'''))
x.set_bounds(ub=5)
self.assertEqual(so.to_optmodel(m), cleandoc('''
proc optmodel;
min test_model_var_ub_obj = 0;
var x <= 5;
solve;
quit;'''))
def test_model_add(self):
m = so.Model(name='test_add')
x = so.Variable(name='x')
self.assertEqual(m.get_variables(), [])
m.add(x)
self.assertEqual(m.get_variables(), [x])
def test_model_session(self):
m = so.Model(name='m')
s = m.get_session()
self.assertEqual(s, None)
if TestModel.conn:
m.set_session(TestModel.conn)
self.assertEqual(m.get_session(), TestModel.conn)
self.assertEqual(m.get_session_type(), 'CAS')
def test_names(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
m = so.Model(name='test_var_names', session=TestModel.conn)
a = ['apple', 'apple juice']
x = m.add_variables(a, name='amount', lb=1)
m.set_objective(so.expr_sum(x[i] for i in a), name='obj', sense=so.minimize)
m.solve()
for i in a:
self.assertEqual(x[i].get_value(), 1.0)
def test_export(self):
m = TestModel.get_standard_model('test_model_export')
x = m.get_variable('x')
mps_text = m.export_mps(fetch=True)
print(mps_text)
self.assertEqual(mps_text.replace(' ', ''), inspect.cleandoc(
"""
NAME test_model_export
ROWS
MIN test_model_export_obj
L c1
L c2[0]
L c2[1]
COLUMNS
x c1 1.0
y[0] c2[0] 1.0
y[1] c2[1] 1.0
RHS
RHS c1 5.0 c2[0] 3.0
RHS c2[1] 3.0
RANGES
BOUNDS
FR BND x
FR BND y[0]
FR BND y[1]
ENDATA"""
).replace(' ', ''))
m.add_constraint(x ** 2 + x <= 10, name='qb')
def generate_error():
m.export_mps()
self.assertRaises(ValueError, generate_error)
def tearDown(self):
so.reset()
| 39.620069 | 125 | 0.453514 |
from collections import OrderedDict
from difflib import SequenceMatcher
import inspect
import os
import unittest
import warnings
from inspect import cleandoc
import sasoptpy as so
from tests.swat_config import create_cas_connection
class MockSASconfig:
def __init__(self, name):
self.name = name
class SASsession:
def __init__(self, cfgname):
import saspy
self.sascfg = MockSASconfig(name=cfgname)
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.conn = None
from swat import CAS, SWATError
try:
cls.conn = create_cas_connection()
except SWATError:
warnings.warn('CAS connection is not available',
RuntimeWarning)
except TypeError:
warnings.warn('CAS variables are not available',
RuntimeWarning)
@classmethod
def tearDownClass(cls):
if cls.conn is not None:
cls.conn.close()
def setUp(self):
pass
@classmethod
def get_standard_model(cls, name):
m = so.Model(name=name)
x = m.add_variable(name='x')
y = m.add_variables(2, name='y')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraints((y[i] <= 3 for i in range(2)), name='c2')
return m
def test_initialize(self):
m = so.Model(name='test_initialize', session=None)
self.assertEqual(type(m), so.Model)
def test_comparison(self):
model1 = so.Model(name='test_equal_1', session=None)
model2 = so.Model(name='test_equal_2', session=None)
self.assertFalse(model1 == model2)
model3 = model1
self.assertTrue(model1 == model3)
def invalid_comparison():
_ = model1 == list()
self.assertWarns(RuntimeWarning, invalid_comparison)
def test_get_name(self):
m = so.Model(name='m')
self.assertEqual(m.get_name(), 'm')
def test_adding_variable(self):
m = so.Model(name='test_add_variable')
x = m.add_variable(name='x')
y = m.add_variable(name='y', vartype=so.INT)
z = m.add_variable(name='z', lb=1, ub=10)
w = m.add_variable(name='w', init=5)
u = so.Variable(name='u')
m.include(u)
self.assertEqual(m.get_variables(), [x, y, z, w, u])
self.assertEqual(m.get_variable_dict(), {'x': x, 'y': y, 'z': z,
'w': w, 'u': u})
self.assertIs(m.get_variable('x'), x)
self.assertIs(m.get_variable('t'), None)
def test_duplicate_variables(self):
m = so.Model(name='test_duplicate_variables')
def add_multi_var():
x = m.add_variable(name='x', lb=2)
x2 = m.add_variable(name='x', lb=1)
self.assertWarns(UserWarning, add_multi_var)
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
min test_duplicate_variables_obj = 0;
var x >= 1;
solve;
quit;"""))
def test_dropping_variable(self):
m = so.Model(name='test_drop_variable')
x = m.add_variable(name='x')
self.assertIs(m.get_variables()[0], x)
self.assertIs(m.get_variable_dict()['x'], x)
m.drop_variable(x)
self.assertEqual(m.get_variables(), [])
self.assertEqual(m.get_variable_dict(), {})
m.include(x)
self.assertIs(m.get_variable_dict()['x'], x)
m.drop(x)
self.assertEqual(m.get_variable_dict(), {})
def test_drop_restore_var(self):
m = so.Model(name='test_drop_restore')
x = m.add_variable(name='x')
y = m.add_variables(5, name='y')
m.set_objective(y[3], sense=so.minimize, name='obj')
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
solve;
quit;'''))
m.drop_variable(x)
m.drop_variable(y[1])
m.drop_variable(y[2])
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var y {{0,1,2,3,4}};
min obj = y[3];
drop y[1] y[2];
solve;
quit;'''))
m.restore_variable(x)
m.restore_variable(y[2])
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
drop y[1];
solve;
quit;'''))
def test_adding_vargroup(self):
m = so.Model(name='test_add_vg')
x = m.add_variables(2, name='x')
y = m.add_variables(['a', 'b'], name='y', vartype=so.BIN)
I = so.abstract.Set(name='I')
z = m.add_variables(I, name='z', lb=1, ub=10, init=5)
w = so.VariableGroup(5, name='w')
m.include(w)
vars = [('x', x), ('y', y), ('z', z), ('w', w)]
self.assertEqual(m.get_grouped_variables(), OrderedDict(vars))
self.assertIs(m.get_variable('x')[0], x[0])
def test_dropping_vargroup(self):
m = so.Model(name='test_drop_vg')
x = m.add_variables(2, name='x')
self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))
m.drop_variables(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict())
m.include(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))
m.drop(x)
self.assertEqual(m.get_grouped_variables(), OrderedDict())
def test_adding_constraint(self):
m = so.Model(name='test_add_constraint')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraint(2 * x + x ** 5 >= 1, name='c2')
self.assertEqual([c1, c2], m.get_constraints())
self.assertEqual({'c1': c1, 'c2': c2}, m.get_constraints_dict())
def invalid_constraint():
from math import inf
c3 = m.add_constraint(x <= inf, name='c3')
self.assertRaises(ValueError, invalid_constraint)
cx = m.get_constraint('c1')
self.assertEqual(cx, c1)
cy = m.get_constraint('c3')
self.assertEqual(cy, None)
def test_duplicate_constraints(self):
m = so.Model(name='test_duplicate_constraints')
def add_multi_con():
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c')
c2 = m.add_constraint(x <= 5, name='c')
self.assertWarns(UserWarning, add_multi_con)
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
min test_duplicate_constraints_obj = 0;
var x;
con c : x <= 5;
solve;
quit;"""))
def test_drop_restore_cons(self):
m = so.Model(name='test_drop_restore_constraints')
x = m.add_variable(name='x')
y = m.add_variables(5, name='y')
m.set_objective(y[3], sense=so.minimize, name='obj')
c1 = m.add_constraint(x <= 5, name='c1')
c2 = m.add_constraints((y[i] <= i for i in range(5)), name='c2')
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c1 : x <= 5;
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
solve;
quit;"""))
m.drop_constraint(c1)
m.drop_constraint(c2[1])
m.drop_constraint(c2[2])
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
drop c2_1 c2_2;
solve;
quit;"""))
m.restore_constraint(c1)
m.restore_constraint(c2[2])
self.assertEqual(m.to_optmodel(), cleandoc("""
proc optmodel;
var x;
var y {{0,1,2,3,4}};
min obj = y[3];
con c1 : x <= 5;
con c2_0 : y[0] <= 0;
con c2_1 : y[1] <= 1;
con c2_2 : y[2] <= 2;
con c2_3 : y[3] <= 3;
con c2_4 : y[4] <= 4;
drop c2_1;
solve;
quit;"""))
def test_dropping_constraint(self):
m = so.Model(name='test_drop_constraint')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 5, name='c1')
self.assertEqual({'c1': c1}, m.get_constraints_dict())
m.drop_constraint(c1)
self.assertEqual({}, m.get_constraints_dict())
m.include(c1)
self.assertEqual({'c1': c1}, m.get_constraints_dict())
m.drop(c1)
self.assertEqual({}, m.get_constraints_dict())
def test_adding_constraints(self):
m = so.Model(name='test_add_cg')
x = m.add_variables(5, name='x')
c1 = m.add_constraints((x[i] >= i for i in range(5)), name='c1')
self.assertEqual(OrderedDict([('c1', c1)]), m.get_grouped_constraints())
self.assertEqual(c1, m.get_constraint('c1'))
c2 = so.ConstraintGroup((i * x[i] <= 10 for i in range(5)), name='c2')
m.include(c2)
grouped_con_dict = OrderedDict([('c1', c1), ('c2', c2)])
self.assertEqual(grouped_con_dict, m.get_grouped_constraints())
def warn_user_single_constraint():
c3 = m.add_constraints(x[0] >= 1, name='c3')
self.assertWarns(UserWarning, warn_user_single_constraint)
def test_dropping_constraints(self):
m = so.Model(name='test_drop_cg')
x = m.add_variables(2, name='x')
c1 = m.add_constraints((x[i] <= i for i in range(2)), name='c1')
self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))
m.drop_constraints(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict())
m.include(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))
m.drop(c1)
self.assertEqual(m.get_grouped_constraints(), OrderedDict())
def test_add_set(self):
m = so.Model(name='test_add_set')
I = m.add_set(name='I', init=2)
self.assertEqual(m.get_sets(), [I])
self.assertEqual(so.to_definition(m.get_sets()[0]), "set I init 2;")
def test_add_parameter(self):
m = so.Model(name='test_add_parameter')
p = m.add_parameter(name='p', init=10)
I = m.add_set(name='I')
r = m.add_parameter(I, name='r', init=5)
self.assertEqual([p, r], m.get_parameters())
m.drop(r)
self.assertEqual([p], m.get_parameters())
def test_add_implicit_var(self):
m = so.Model(name='test_add_impvar')
x = m.add_variables(5, name='x')
y = m.add_implicit_variable((i * x[i] + x[i] ** 2 for i in range(5)),
name='y')
self.assertEqual([y], m.get_implicit_variables())
def test_add_literal_statement(self):
m = so.Model(name='test_add_literal_statement')
m.set_objective(0, name='empty_obj')
m.add_statement('var x {0,1};')
m.add_statement('solve;')
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
quit;'''))
s = so.abstract.LiteralStatement('print x;')
m.include(s)
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
print x;
quit;'''))
m.drop(s)
self.assertEqual(
m.to_optmodel(solve=False),
inspect.cleandoc('''
proc optmodel;
min empty_obj = 0;
var x {0,1};
solve;
quit;'''))
def test_add_abstract_statement(self):
m = so.Model(name='m')
x = m.add_variable(name='x')
m.set_objective(x ** 2, sense=so.MIN, name='obj')
s = so.abstract.LiteralStatement('expand;')
m.add_statement(s)
self.assertEqual(so.to_optmodel(m), inspect.cleandoc("""
proc optmodel;
var x;
min obj = (x) ^ (2);
expand;
solve;
quit;
"""))
def test_postsolve_statement(self):
m = so.Model(name='test_postsolve_statement')
x = m.add_variable(name='x')
c1 = m.add_constraint(x <= 10, name='c1')
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
quit;"""))
m.add_postsolve_statement('print x;')
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
print x;
quit;"""))
m.add_postsolve_statement(so.abstract.LiteralStatement('expand;'))
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min test_postsolve_statement_obj = 0;
var x;
con c1 : x <= 10;
solve;
print x;
expand;
quit;"""))
def test_include_model(self):
m1 = so.Model(name='test_copy_model_1')
x = m1.add_variable(name='x')
y = m1.add_variables(2, name='y')
c1 = m1.add_constraint(x + y[0] >= 2, name='c1')
c2 = m1.add_constraints((x - y[i] <= 10 for i in range(2)), name='c2')
m1.set_objective(2 * x + y[0] + 3 * y[1], name='model_obj')
m2 = so.Model(name='test_copy_model_2')
m2.include(m1)
vars = OrderedDict([('x', x), ('y', y)])
self.assertEqual(m2.get_grouped_variables(), vars)
cons = OrderedDict([('c1', c1), ('c2', c2)])
self.assertEqual(m2.get_grouped_constraints(), cons)
self.assertEqual(m2.to_optmodel(),inspect.cleandoc("""
proc optmodel;
var x;
var y {{0,1}};
con c1 : x + y[0] >= 2;
con c2_0 : x - y[0] <= 10;
con c2_1 : x - y[1] <= 10;
min model_obj = 2 * x + y[0] + 3 * y[1];
solve;
quit;"""))
def test_set_get_objective(self):
m = so.Model(name='test_set_get_objective')
x = m.add_variable(name='x')
obj1 = m.set_objective(2 * x, sense=so.MIN, name='obj1')
self.assertIs(obj1, m.get_objective())
obj2 = m.set_objective(5 * x, sense=so.MIN, name='obj2')
self.assertIs(obj2, m.get_objective())
obj3 = m.append_objective(10 * x, sense=so.MIN, name='obj3')
self.assertEqual([obj2, obj3], m.get_all_objectives())
self.assertEqual(
m.to_optmodel(),
inspect.cleandoc("""
proc optmodel;
var x;
min obj2 = 5 * x;
min obj3 = 10 * x;
solve;
quit;"""))
def test_get_objective_value(self):
m = so.Model(name='test_objective_value')
x = m.add_variable(name='x')
m.set_objective(x ** 2 - 4 * x + 5, sense=so.MIN, name='nonlinear')
x.set_value(3)
self.assertEqual(m.get_objective_value(), 2)
if TestModel.conn:
m.set_session(TestModel.conn)
m.solve()
self.assertEqual(m.get_objective_value(), 1)
self.assertEqual(x.get_value(), 2)
else:
self.skipTest('No CAS connection available, skipping ' +
'objective value test')
def zero_div_error():
m.set_objective(x / x, sense=so.MIN, name='nonlinear2')
x.set_value(0)
m.clear_solution()
m.get_objective_value()
self.assertRaises(ZeroDivisionError, zero_div_error)
def test_variable_coef(self):
m = so.Model(name='test_get_variable_coef')
x = m.add_variable(name='x')
m.set_objective(5 * x, sense=so.MIN, name='obj1')
self.assertEqual(m.get_variable_coef(x), 5)
self.assertEqual(m.get_variable_coef('x'), 5)
y = so.Variable(name='y')
def variable_not_in_model():
return m.get_variable_coef(y)
self.assertRaises(RuntimeError, variable_not_in_model)
m.set_objective(2 * x + y ** 2, sense=so.MIN, name='obj1')
self.assertEqual(m.get_variable_coef('x'), 2)
def nonlinear_objective():
return m.get_variable_coef('y')
self.assertWarns(RuntimeWarning, nonlinear_objective)
def test_get_variable_value(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
m = so.Model(name='test_get_var_value')
x = m.add_variable(name='x', lb=1.5, ub=10, vartype=so.INT)
m.set_objective(x, sense=so.MIN, name='obj1')
m.set_session(TestModel.conn)
m.solve(verbose=True)
self.assertEqual(m.get_variable_value(x), 2)
I = m.add_set(name='I', value=range(2))
y = m.add_variables(I, name='y', lb=0.5)
m.set_objective(x + y[0] + y[1], sense=so.MIN, name='obj1')
m.solve()
self.assertEqual(m.get_variable_value(y[0]), 0.5)
def get_variable_warning():
self.assertEqual(m.get_variable_value('z'), None)
self.assertWarns(UserWarning, get_variable_warning)
m2 = so.Model(name='test_get_var_value_copy')
m2.include(m)
z = so.Variable(name='z')
def raise_solution_error():
return m2.get_variable_value(z)
self.assertRaises(RuntimeError, raise_solution_error)
m.add_variable(name='var with invalid name')
def raise_syntax_error():
return m.solve()
self.assertRaises(SyntaxError, raise_syntax_error)
def test_get_variable_value_abstract(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
import pandas as pd
so.reset()
m = so.Model(name='abstract_model')
df = pd.DataFrame([
['a', 1],
['b', 2]
], columns=['tag', 'val'])
idx = so.Set(name='idx', settype=so.STR)
varlb = so.ParameterGroup(idx, name='varlb')
m.include(idx, varlb)
table = TestModel.conn.upload_frame(df, casout='server_data')
from sasoptpy.actions import read_data
r = read_data(
table=table,
index={'target': idx, 'key': 'tag'},
columns=[
{'target': varlb, 'column': 'val'}
]
)
m.include(r)
y = so.VariableGroup(idx, name='y')
c = so.ConstraintGroup((y[i] >= varlb[i] for i in idx), name='c')
m.include(y, c)
self.assertEqual(m.to_optmodel(), inspect.cleandoc("""
proc optmodel;
min abstract_model_obj = 0;
set <str> idx;
num varlb {idx};
read data SERVER_DATA into idx=[tag] varlb=val;
var y {{idx}};
con c {o8 in idx} : y[o8] - varlb[o8] >= 0;
solve;
quit;
"""))
m.set_session(TestModel.conn)
m.solve()
self.assertEqual(m.get_variable_value(y['a']), 1)
self.assertEqual(m.get_statements(), [r])
def test_get_summaries(self):
if not TestModel.conn:
self.skipTest('Session is not available')
m = so.Model(name='test_get_summaries', session=TestModel.conn)
x = m.add_variable(name='x', lb=1)
y = m.add_variables(2, name='y', lb=1)
m.set_objective(x + y[0], sense=so.MIN, name='obj1')
m.add_constraint(x + 2 *y[0] + 3*y[1] >= 10, name='con1')
m.solve()
self.assertEqual(m.get_problem_summary().to_string(),
inspect.cleandoc("""
Value
Label
Objective Sense Minimization
Objective Function obj1
Objective Type Linear
Number of Variables 3
Bounded Above 0
Bounded Below 3
Bounded Below and Above 0
Free 0
Fixed 0
Number of Constraints 1
Linear LE (<=) 0
Linear EQ (=) 0
Linear GE (>=) 1
Linear Range 0
Constraint Coefficients 3"""))
seq = SequenceMatcher(None, m.get_solution_summary().to_string(),
inspect.cleandoc(
"""
Value
Label
Solver LP
Algorithm Dual Simplex
Objective Function obj1
Solution Status Optimal
Objective Value 2
Primal Infeasibility 0
Dual Infeasibility 0
Bound Infeasibility 0
Iterations 0
Presolve Time 0.00
Solution Time 0.00"""
))
self.assertTrue(seq.ratio() > 0.99)
def test_get_solution(self):
if not TestModel.conn:
self.skipTest('No session is defined, skipping get solution test')
import pandas as pd
m = so.Model(name='test_get_soln', session=TestModel.conn)
data = [
['pen', 1, 3, 11],
['mug', 15, 10, 5],
['watch', 50, 2, 2],
['pc', 1500, 200, 1]
]
data = pd.DataFrame(data, columns=['item', 'value', 'weight', 'ub'])
data = data.set_index(['item'])
items = data.index
get = m.add_variables(items, name='get', vartype=so.INT, lb=0)
value = data['value']
weight = data['weight']
ub = data['ub']
m.set_objective(so.expr_sum(get[i] * value[i] for i in items),
sense=so.MAX, name='obj1')
m.add_constraint(so.expr_sum(get[i] * weight[i] for i in items)
<= 210, name='value_total')
m.add_constraints((get[i] <= ub[i] for i in items), name='upper_bound')
m.solve(verbose=True)
self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(
"""
i var value lb ub rc
0 1.0 get[pen] 2.0 -0.0 1.797693e+308 NaN
1 2.0 get[mug] -0.0 -0.0 1.797693e+308 NaN
2 3.0 get[watch] 2.0 -0.0 1.797693e+308 NaN
3 4.0 get[pc] 1.0 -0.0 1.797693e+308 NaN
"""
))
self.assertEqual(m.get_solution(vtype='dual').to_string(),
inspect.cleandoc(
"""
j con value dual
0 1.0 value_total 210.0 NaN
1 2.0 upper_bound_pen 2.0 NaN
2 3.0 upper_bound_mug -0.0 NaN
3 4.0 upper_bound_watch 2.0 NaN
4 5.0 upper_bound_pc 1.0 NaN
"""
))
m.solve(mps=True, options={'maxpoolsols': 3}, verbose=True)
self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(
"""
var lb ub value solution
0 get[pen] 0.0 1.797693e+308 2.0 1.0
1 get[mug] 0.0 1.797693e+308 0.0 1.0
2 get[watch] 0.0 1.797693e+308 2.0 1.0
3 get[pc] 0.0 1.797693e+308 1.0 1.0
4 get[pen] 0.0 1.797693e+308 1.0 2.0
5 get[mug] 0.0 1.797693e+308 0.0 2.0
6 get[watch] 0.0 1.797693e+308 1.0 2.0
7 get[pc] 0.0 1.797693e+308 1.0 2.0
8 get[pen] 0.0 1.797693e+308 0.0 3.0
9 get[mug] 0.0 1.797693e+308 0.0 3.0
10 get[watch] 0.0 1.797693e+308 0.0 3.0
11 get[pc] 0.0 1.797693e+308 0.0 3.0
"""
))
self.assertEqual(m.get_solution('dual').to_string(), inspect.cleandoc(
"""
con value solution
0 value_total 210.0 1.0
1 upper_bound['pen'] 2.0 1.0
2 upper_bound['mug'] 0.0 1.0
3 upper_bound['watch'] 2.0 1.0
4 upper_bound['pc'] 1.0 1.0
5 value_total 205.0 2.0
6 upper_bound['pen'] 1.0 2.0
7 upper_bound['mug'] 0.0 2.0
8 upper_bound['watch'] 1.0 2.0
9 upper_bound['pc'] 1.0 2.0
10 value_total 0.0 3.0
11 upper_bound['pen'] 0.0 3.0
12 upper_bound['mug'] 0.0 3.0
13 upper_bound['watch'] 0.0 3.0
14 upper_bound['pc'] 0.0 3.0
"""
))
self.assertEqual(m.get_solution(pivot=True).to_string(),
inspect.cleandoc(
"""
solution 1.0 2.0 3.0
var
get[mug] 0.0 0.0 0.0
get[pc] 1.0 1.0 0.0
get[pen] 2.0 1.0 0.0
get[watch] 2.0 1.0 0.0
"""
))
self.assertEqual(m.get_solution('dual', pivot=True).to_string(),
inspect.cleandoc(
"""
solution 1.0 2.0 3.0
con
upper_bound['mug'] 0.0 0.0 0.0
upper_bound['pc'] 1.0 1.0 0.0
upper_bound['pen'] 2.0 1.0 0.0
upper_bound['watch'] 2.0 1.0 0.0
value_total 210.0 205.0 0.0
"""
))
self.assertEqual(m.get_solution('primal', solution=2).to_string(),
inspect.cleandoc(
"""
var lb ub value solution
4 get[pen] 0.0 1.797693e+308 1.0 2.0
5 get[mug] 0.0 1.797693e+308 0.0 2.0
6 get[watch] 0.0 1.797693e+308 1.0 2.0
7 get[pc] 0.0 1.797693e+308 1.0 2.0
"""
))
self.assertEqual(m.get_solution('dual', solution=3).to_string(),
inspect.cleandoc(
"""
con value solution
10 value_total 0.0 3.0
11 upper_bound['pen'] 0.0 3.0
12 upper_bound['mug'] 0.0 3.0
13 upper_bound['watch'] 0.0 3.0
14 upper_bound['pc'] 0.0 3.0
"""
))
m.print_solution()
def third_type():
m.get_solution('x')
self.assertRaises(ValueError, third_type)
def test_set_coef(self):
m = so.Model(name='test_set_coef')
x = m.add_variable(name='x')
y = m.add_variables(2, name='y')
z = m.add_variable(name='z')
obj = m.set_objective(2*x + 3*y[0] + 2*y[1], name='obj', sense=so.MIN)
c1 = m.add_constraint(2* x + 5 * y[0] + 7 * y[1] <= 15, name='c1')
self.assertEqual(m.get_variable_coef(x), 2)
m.set_variable_coef(x, 3)
self.assertEqual(m.get_variable_coef(x), 3)
self.assertEqual(m.get_variable_coef(z), 0)
m.set_variable_coef(z, 1)
self.assertEqual(m.get_variable_coef(z), 1)
def test_to_mps(self):
m = so.Model(name='test_to_mps')
x = m.add_variable(name='x', lb=0, ub=5, vartype=so.INT)
y = m.add_variables(2, name='y', lb=1)
m.set_objective(x + y[0], sense=so.MIN, name='xyobj')
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MIN xyobj NaN NaN 3
3 COLUMNS NaN NaN 4
4 MARK0000 'MARKER' NaN 'INTORG' NaN 5
5 x xyobj 1.0 NaN 6
6 MARK0001 'MARKER' NaN 'INTEND' NaN 7
7 y[0] xyobj 1.0 NaN 8
8 y[1] xyobj 0.0 NaN 9
9 RHS NaN NaN 10
10 RANGES NaN NaN 11
11 BOUNDS NaN NaN 12
12 LO BND x 0.0 NaN 13
13 UP BND x 5.0 NaN 14
14 LO BND y[0] 1.0 NaN 15
15 LO BND y[1] 1.0 NaN 16
16 ENDATA 0.0 0.0 17
"""
))
m.set_objective(x + 10, name='o', sense=so.MAX)
self.assertEqual(m.to_mps(constant=True).to_string(),
inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 COLUMNS NaN NaN 4
4 MARK0000 'MARKER' NaN 'INTORG' NaN 5
5 x o_constant 1.0 NaN 6
6 MARK0001 'MARKER' NaN 'INTEND' NaN 7
7 y[0] o_constant 0.0 NaN 8
8 y[1] o_constant 0.0 NaN 9
9 obj_constant o_constant 1.0 NaN 10
10 RHS NaN NaN 11
11 RANGES NaN NaN 12
12 BOUNDS NaN NaN 13
13 LO BND x 0.0 NaN 14
14 UP BND x 5.0 NaN 15
15 LO BND y[0] 1.0 NaN 16
16 LO BND y[1] 1.0 NaN 17
17 FX BND obj_constant 10.0 NaN 18
18 ENDATA 0.0 0.0 19
"""
))
c1 = m.add_constraint(y[0] + x >= 0, name='zero_lb')
c2 = m.add_constraint(y[0] <= 100, name='inf_ub')
from math import inf
c2.set_rhs(inf)
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 G zero_lb NaN NaN 4
4 L inf_ub NaN NaN 5
5 COLUMNS NaN NaN 6
6 MARK0000 'MARKER' NaN 'INTORG' NaN 7
7 x o_constant 1.0 zero_lb 1.0 8
8 MARK0001 'MARKER' NaN 'INTEND' NaN 9
9 y[0] zero_lb 1.0 inf_ub 1.0 10
10 y[1] o_constant 0.0 NaN 11
11 obj_constant o_constant 1.0 NaN 12
12 RHS NaN NaN 13
13 RANGES NaN NaN 14
14 BOUNDS NaN NaN 15
15 LO BND x 0.0 NaN 16
16 UP BND x 5.0 NaN 17
17 LO BND y[0] 1.0 NaN 18
18 LO BND y[1] 1.0 NaN 19
19 FX BND obj_constant 10.0 NaN 20
20 ENDATA 0.0 0.0 21
"""
))
u = m.add_variable(name='u')
t = m.add_variable(name='t', vartype=so.BIN)
m.drop_constraints(c1, c2)
m.add_constraint(x + 2*y[0] == [3, 8], name='range_con')
self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(
"""
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME test_to_mps 0.0 0.0 1
1 ROWS NaN NaN 2
2 MAX o_constant NaN NaN 3
3 E range_con NaN NaN 4
4 COLUMNS NaN NaN 5
5 MARK0000 'MARKER' NaN 'INTORG' NaN 6
6 x o_constant 1.0 range_con 1.0 7
7 MARK0001 'MARKER' NaN 'INTEND' NaN 8
8 y[0] range_con 2.0 NaN 9
9 y[1] o_constant 0.0 NaN 10
10 obj_constant o_constant 1.0 NaN 11
11 u o_constant 0.0 NaN 12
12 t o_constant 0.0 NaN 13
13 RHS NaN NaN 14
14 RHS range_con 3.0 NaN 15
15 RANGES NaN NaN 16
16 rng range_con 5.0 NaN 17
17 BOUNDS NaN NaN 18
18 LO BND x 0.0 NaN 19
19 UP BND x 5.0 NaN 20
20 LO BND y[0] 1.0 NaN 21
21 LO BND y[1] 1.0 NaN 22
22 FX BND obj_constant 10.0 NaN 23
23 FR BND u NaN NaN 24
24 BV BND t 1.0 NaN 25
25 ENDATA 0.0 0.0 26
"""
))
def get_frame_warning():
r = m.to_frame()
self.assertWarns(DeprecationWarning, get_frame_warning)
def test_to_optmodel(self):
m = so.Model(name='test_to_optmodel')
self.assertEqual(m.to_optmodel(), inspect.cleandoc(
"""
proc optmodel;
min test_to_optmodel_obj = 0;
solve;
quit;
"""
))
x = m.add_variable(name='x', init=5)
e1 = m.set_objective(x, sense=so.MIN, name='e1')
e2 = m.append_objective(x**2, sense=so.MAX, name='e2')
response = m.to_optmodel(options={
'with': 'blackbox',
'relaxint': True,
'obj': (e1, e2),
'primalin': True,
}, ods=True, primalin=True, parse=False)
self.assertEqual(response, inspect.cleandoc(
"""
proc optmodel;
var x init 5;
min e1 = x;
max e2 = (x) ^ (2);
solve with blackbox relaxint obj (e1 e2) / primalin;
ods output PrintTable=primal_out;
ods output PrintTable=dual_out;
create data allsols from [s]=(1.._NVAR_) name=_VAR_[s].name {j in 1.._NSOL_} <col('sol_'||j)=_VAR_[s].sol[j]>;
quit;
"""
))
response = m.to_optmodel(options={
'with': 'nlp',
'multistart': {'loglevel': 3, 'maxstarts': 30}
})
self.assertEqual(response, inspect.cleandoc(
"""
proc optmodel;
var x init 5;
min e1 = x;
max e2 = (x) ^ (2);
solve with nlp / multistart=(loglevel=3,maxstarts=30);
quit;
"""
))
def test_str(self):
m = TestModel.get_standard_model(name='test_model_str')
response = str(m)
self.assertEqual(response, inspect.cleandoc(
"""
Model: [
Name: test_model_str
Objective: MIN [0]
Variables (3): [
x
y[0]
y[1]
]
Constraints (3): [
x <= 5
y[0] <= 3
y[1] <= 3
]
]
"""
))
if TestModel.conn:
m.set_session(TestModel.conn)
response = str(m)
self.assertEqual(response, inspect.cleandoc(
"""
Model: [
Name: test_model_str
Session: {}:{}
Objective: MIN [0]
Variables (3): [
x
y[0]
y[1]
]
Constraints (3): [
x <= 5
y[0] <= 3
y[1] <= 3
]
]
""".format(os.environ.get('CASHOST'), os.environ.get('CASPORT'))
))
def test_model_repr(self):
m = so.Model(name='test_model_repr')
self.assertEqual(repr(m), "sasoptpy.Model(name='test_model_repr')")
s = SASsession(cfgname='winlocal')
m.set_session(s)
self.assertEqual(
repr(m),
"sasoptpy.Model(name='test_model_repr', "
"session=saspy.SASsession(cfgname='winlocal'))")
if TestModel.conn:
m.set_session(TestModel.conn)
cas_repr = repr(m.get_session())
self.assertEqual(
repr(m), "sasoptpy.Model(name='test_model_repr', session=" +
cas_repr + ')')
def invalid_session_type():
w = 5
m.set_session(w)
rp = repr(m)
self.assertRaises(TypeError, invalid_session_type)
def test_defn(self):
m = TestModel.get_standard_model('test_model_defn')
self.assertEqual(so.to_definition(m), "problem test_model_defn "
"include x y c1 c2;")
def test_expr(self):
m = TestModel.get_standard_model('test_model_expr')
self.assertEqual(m.to_optmodel(), so.to_expression(m))
def test_is_linear(self):
m = TestModel.get_standard_model('test_model_linearity')
self.assertEqual(so.is_linear(m), True)
x = m.get_variable('x')
qbound = m.add_constraint(x ** 2 + x <= 10, name='qbound')
self.assertEqual(so.is_linear(m), False)
m.drop_constraint(qbound)
self.assertEqual(so.is_linear(m), True)
m.set_objective(x ** 2, sense=so.MIN, name='x_squared')
self.assertEqual(so.is_linear(m), False)
def test_session_type(self):
m = TestModel.get_standard_model('test_model_session_type')
self.assertEqual(m.get_session_type(), None)
if TestModel.conn:
m.set_session(TestModel.conn)
self.assertEqual(m.get_session_type(), 'CAS')
def test_ub_set(self):
m = so.Model(name='test_model_var_ub')
x = m.add_variable(name='x')
self.assertEqual(so.to_optmodel(m), cleandoc('''
proc optmodel;
min test_model_var_ub_obj = 0;
var x;
solve;
quit;'''))
x.set_bounds(ub=5)
self.assertEqual(so.to_optmodel(m), cleandoc('''
proc optmodel;
min test_model_var_ub_obj = 0;
var x <= 5;
solve;
quit;'''))
def test_model_add(self):
m = so.Model(name='test_add')
x = so.Variable(name='x')
self.assertEqual(m.get_variables(), [])
m.add(x)
self.assertEqual(m.get_variables(), [x])
def test_model_session(self):
m = so.Model(name='m')
s = m.get_session()
self.assertEqual(s, None)
if TestModel.conn:
m.set_session(TestModel.conn)
self.assertEqual(m.get_session(), TestModel.conn)
self.assertEqual(m.get_session_type(), 'CAS')
def test_names(self):
if TestModel.conn is None:
self.skipTest('Session is not available')
m = so.Model(name='test_var_names', session=TestModel.conn)
a = ['apple', 'apple juice']
x = m.add_variables(a, name='amount', lb=1)
m.set_objective(so.expr_sum(x[i] for i in a), name='obj', sense=so.minimize)
m.solve()
for i in a:
self.assertEqual(x[i].get_value(), 1.0)
def test_export(self):
m = TestModel.get_standard_model('test_model_export')
x = m.get_variable('x')
mps_text = m.export_mps(fetch=True)
print(mps_text)
self.assertEqual(mps_text.replace(' ', ''), inspect.cleandoc(
"""
NAME test_model_export
ROWS
MIN test_model_export_obj
L c1
L c2[0]
L c2[1]
COLUMNS
x c1 1.0
y[0] c2[0] 1.0
y[1] c2[1] 1.0
RHS
RHS c1 5.0 c2[0] 3.0
RHS c2[1] 3.0
RANGES
BOUNDS
FR BND x
FR BND y[0]
FR BND y[1]
ENDATA"""
).replace(' ', ''))
m.add_constraint(x ** 2 + x <= 10, name='qb')
def generate_error():
m.export_mps()
self.assertRaises(ValueError, generate_error)
def tearDown(self):
so.reset()
| true | true |
1c31f535a8cc4c7d63da95d2f6c8c48bb23f12c0 | 7,157 | py | Python | ungroupedCommands.py | atnguye2/HerosHavenBot | 6d82d79e0c0d22bd0ffb7d9f3b9f070f0d3d1103 | [
"MIT"
] | null | null | null | ungroupedCommands.py | atnguye2/HerosHavenBot | 6d82d79e0c0d22bd0ffb7d9f3b9f070f0d3d1103 | [
"MIT"
] | null | null | null | ungroupedCommands.py | atnguye2/HerosHavenBot | 6d82d79e0c0d22bd0ffb7d9f3b9f070f0d3d1103 | [
"MIT"
] | 1 | 2018-11-30T02:11:15.000Z | 2018-11-30T02:11:15.000Z | import discord
import commandHelpers
import string
import unicodedata
import random
from discord.ext import commands
import googleSheets
# Read the Oz Config Google Sheets
dmxpRows = googleSheets.getDmXpRows()
flavorTextRows = googleSheets.getFlavorTextRows()
judgeTextRows = googleSheets.getJudgeTextRows()
resRows = googleSheets.getResRows()
pfxpRows = googleSheets.getPfXpRows()
class UngroupedCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(description='This is a command that echos the input')
async def echo(self, ctx, *sentence):
# Repeat what the user inputs. This is an example
msg = ''
for word in sentence:
msg += word
msg += ' '
await ctx.send(msg)
@commands.command(description='This is a command that calculates DM rewards')
async def dmxp(self, ctx, dmpcLevel, hoursPlayed, isMultishot='n'):
hoursPlayed = commandHelpers.round_nearest_half(float(hoursPlayed))
if any(n in isMultishot for n in commandHelpers.AFFIRMATIVE_REPLIES):
multishotCoefficient = 1.2
gameType = 'Multi-shot'
else:
multishotCoefficient = 1
gameType = 'One-shot'
print("multishotCoefficient = " + str(multishotCoefficient))
selectedRow = (dmxpRows[int(dmpcLevel)])
calculatedXP = int(selectedRow['xpHr']) * hoursPlayed
calculatedGP = int(selectedRow['gpHr']) * hoursPlayed * multishotCoefficient
calculatedRes = int(selectedRow['resHr']) * hoursPlayed * multishotCoefficient
calculatedPCdtd = 2 * hoursPlayed
calculatedDMdtd = 4 * hoursPlayed
flavor = flavorTextRows[random.randint(0, 6)]['flavortext'] #random index based on the number of options defined in the google Sheets config
msgOut = """
{flavor}
```md
DMPC {gameType} rewards for a level {dmpcLevel} character, adjusted to {hoursPlayed} hours played.
DTD: Players: {calculatedPCdtd}, DM: {calculatedDMdtd}
DMXP: {calculatedXP}
DM Gold: {calculatedGP}
DM Res: {calculatedRes}```"""
msgOut = msgOut.format(flavor=str(flavor), gameType=str(gameType), dmpcLevel=str(dmpcLevel),
hoursPlayed=str(hoursPlayed), calculatedPCdtd=str(calculatedPCdtd),
calculatedDMdtd=str(calculatedDMdtd), calculatedXP=str(calculatedXP),
calculatedGP=str(calculatedGP), calculatedRes=str(calculatedRes))
await ctx.send(msgOut)
@commands.command(description='This is a command that adds reactions to a message', pass_context=True)
async def react(self, ctx, numberOfOptions):
myChannel = ctx.message.channel
print(myChannel)
numberOfOptions = int(numberOfOptions)
async for msgs in myChannel.history(limit=1, before=ctx.message):
myMessage = msgs
for x in range(0, numberOfOptions):
y = string.ascii_lowercase[x]
y = "REGIONAL INDICATOR SYMBOL LETTER " + y
await myMessage.add_reaction(emoji=unicodedata.lookup(y))
@commands.command(description='This is a command created for the Liars Mask/Halloween event.')
async def judge(self, ctx):
judgement = judgeTextRows[random.randint(0, 6)]['judgetext'] #random index based on the number of options defined in the google Sheets config
msgOut = """{flavor}"""
msgOut = msgOut.format(flavor=str(judgement))
await ctx.send(msgOut)
@commands.command(description='This is a command that calculates Residuum rewards for 5e games', pass_context=True)
async def res(self, ctx, totalXP, minpc, numPlayers=1, isMultishot='n'):
if any(n in isMultishot for n in commandHelpers.AFFIRMATIVE_REPLIES):
multishotCoefficient = 1.2
gameType = 'Multi-shot'
else:
multishotCoefficient = 1
gameType = 'One-shot'
numPlayers = int(numPlayers)
print("multishotCoefficient = " + str(multishotCoefficient))
selectedRow = (resRows[int(minpc)])
print(selectedRow)
XPDenom = float(selectedRow['XPdenominator'])
calculatedRes = int(totalXP) / XPDenom * multishotCoefficient
maxMIFound = int(selectedRow['maxMI'])
splitGold = int(calculatedRes) / numPlayers
splitXP = int(totalXP) / numPlayers
sheetlink = '[Crafting Sheet](https://docs.google.com/spreadsheets/d/1kXkZqB6xPjzv8p4J_afmmr6qiZZD_w6xL9XYRuQlRjs/edit?usp=sharing)'
flavor = flavorTextRows[random.randint(0, 6)][
'flavortext'] # random index based on the number of options defined in the google Sheets config
msgOut = """
{flavor}
```md
{gameType} rewards for a session with total of {totalXP} experience points across {numPlayers} players:
The lowest player character was level {minpc}, resulting in a modifier of {XPdenominator}.
Total Residuum Budget: {calculatedRes} for the group.
Maximum Single Magic Item Cost: {maxMIFound}
Maximum Total Gold: {calculatedRes}
Maximum Gold Per Player: {splitGold}
Experience Per Player: {splitXP}
```
{sheetlink}
"""
msgOut = msgOut.format(flavor=str(flavor), gameType=str(gameType), minpc=str(minpc),
calculatedRes=str(round(calculatedRes)), XPdenominator=str(XPDenom),
totalXP=str(totalXP), maxMIFound=str(maxMIFound), splitGold=str(round(splitGold)),
splitXP=str(round(splitXP)), numPlayers=str(numPlayers), sheetlink=str(sheetlink))
embed = discord.Embed(
title="Session Rewards",
description=msgOut,
colour=commandHelpers.getRandomHexColor()
)
#embed.set_footer(text=sheetlink)
await ctx.send(ctx.message.channel, embed=embed)
@commands.command(description='This is a command that calculates Pathfinder session rewards')
async def pfxp(self, ctx, pcLevel, hoursPlayed, difficultyCoefficient=1.0):
hoursPlayed = commandHelpers.round_nearest_half(float(hoursPlayed))
print("difficultyCoefficient = " + str(difficultyCoefficient))
selectedRow = (pfxpRows[int(pcLevel)])
calculatedXP = int(selectedRow['xpHr']) * hoursPlayed * difficultyCoefficient
calculatedXP = int(calculatedXP)
calculatedGP = int(selectedRow['gpHr']) * hoursPlayed * difficultyCoefficient
calculatedGP = int(calculatedGP)
msgOut = """
```md
Pathfinder one shot rewards for a level {pcLevel} character, adjusted to {hoursPlayed} hours played.
Difficulty Modifier: {difficultyCoefficient}
XP: {calculatedXP}
Gold: {calculatedGP}```"""
msgOut = msgOut.format(difficultyCoefficient=str(difficultyCoefficient), pcLevel=str(pcLevel),
hoursPlayed=str(hoursPlayed), calculatedXP=str(calculatedXP),
calculatedGP=str(calculatedGP))
await ctx.send(msgOut)
def setup(client):
client.add_cog(UngroupedCommands(client))
| 45.878205 | 149 | 0.667458 | import discord
import commandHelpers
import string
import unicodedata
import random
from discord.ext import commands
import googleSheets
dmxpRows = googleSheets.getDmXpRows()
flavorTextRows = googleSheets.getFlavorTextRows()
judgeTextRows = googleSheets.getJudgeTextRows()
resRows = googleSheets.getResRows()
pfxpRows = googleSheets.getPfXpRows()
class UngroupedCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(description='This is a command that echos the input')
async def echo(self, ctx, *sentence):
msg = ''
for word in sentence:
msg += word
msg += ' '
await ctx.send(msg)
@commands.command(description='This is a command that calculates DM rewards')
async def dmxp(self, ctx, dmpcLevel, hoursPlayed, isMultishot='n'):
hoursPlayed = commandHelpers.round_nearest_half(float(hoursPlayed))
if any(n in isMultishot for n in commandHelpers.AFFIRMATIVE_REPLIES):
multishotCoefficient = 1.2
gameType = 'Multi-shot'
else:
multishotCoefficient = 1
gameType = 'One-shot'
print("multishotCoefficient = " + str(multishotCoefficient))
selectedRow = (dmxpRows[int(dmpcLevel)])
calculatedXP = int(selectedRow['xpHr']) * hoursPlayed
calculatedGP = int(selectedRow['gpHr']) * hoursPlayed * multishotCoefficient
calculatedRes = int(selectedRow['resHr']) * hoursPlayed * multishotCoefficient
calculatedPCdtd = 2 * hoursPlayed
calculatedDMdtd = 4 * hoursPlayed
flavor = flavorTextRows[random.randint(0, 6)]['flavortext']
msgOut = """
{flavor}
```md
DMPC {gameType} rewards for a level {dmpcLevel} character, adjusted to {hoursPlayed} hours played.
DTD: Players: {calculatedPCdtd}, DM: {calculatedDMdtd}
DMXP: {calculatedXP}
DM Gold: {calculatedGP}
DM Res: {calculatedRes}```"""
msgOut = msgOut.format(flavor=str(flavor), gameType=str(gameType), dmpcLevel=str(dmpcLevel),
hoursPlayed=str(hoursPlayed), calculatedPCdtd=str(calculatedPCdtd),
calculatedDMdtd=str(calculatedDMdtd), calculatedXP=str(calculatedXP),
calculatedGP=str(calculatedGP), calculatedRes=str(calculatedRes))
await ctx.send(msgOut)
@commands.command(description='This is a command that adds reactions to a message', pass_context=True)
async def react(self, ctx, numberOfOptions):
myChannel = ctx.message.channel
print(myChannel)
numberOfOptions = int(numberOfOptions)
async for msgs in myChannel.history(limit=1, before=ctx.message):
myMessage = msgs
for x in range(0, numberOfOptions):
y = string.ascii_lowercase[x]
y = "REGIONAL INDICATOR SYMBOL LETTER " + y
await myMessage.add_reaction(emoji=unicodedata.lookup(y))
@commands.command(description='This is a command created for the Liars Mask/Halloween event.')
async def judge(self, ctx):
judgement = judgeTextRows[random.randint(0, 6)]['judgetext']
msgOut = """{flavor}"""
msgOut = msgOut.format(flavor=str(judgement))
await ctx.send(msgOut)
@commands.command(description='This is a command that calculates Residuum rewards for 5e games', pass_context=True)
async def res(self, ctx, totalXP, minpc, numPlayers=1, isMultishot='n'):
if any(n in isMultishot for n in commandHelpers.AFFIRMATIVE_REPLIES):
multishotCoefficient = 1.2
gameType = 'Multi-shot'
else:
multishotCoefficient = 1
gameType = 'One-shot'
numPlayers = int(numPlayers)
print("multishotCoefficient = " + str(multishotCoefficient))
selectedRow = (resRows[int(minpc)])
print(selectedRow)
XPDenom = float(selectedRow['XPdenominator'])
calculatedRes = int(totalXP) / XPDenom * multishotCoefficient
maxMIFound = int(selectedRow['maxMI'])
splitGold = int(calculatedRes) / numPlayers
splitXP = int(totalXP) / numPlayers
sheetlink = '[Crafting Sheet](https://docs.google.com/spreadsheets/d/1kXkZqB6xPjzv8p4J_afmmr6qiZZD_w6xL9XYRuQlRjs/edit?usp=sharing)'
flavor = flavorTextRows[random.randint(0, 6)][
'flavortext']
msgOut = """
{flavor}
```md
{gameType} rewards for a session with total of {totalXP} experience points across {numPlayers} players:
The lowest player character was level {minpc}, resulting in a modifier of {XPdenominator}.
Total Residuum Budget: {calculatedRes} for the group.
Maximum Single Magic Item Cost: {maxMIFound}
Maximum Total Gold: {calculatedRes}
Maximum Gold Per Player: {splitGold}
Experience Per Player: {splitXP}
```
{sheetlink}
"""
msgOut = msgOut.format(flavor=str(flavor), gameType=str(gameType), minpc=str(minpc),
calculatedRes=str(round(calculatedRes)), XPdenominator=str(XPDenom),
totalXP=str(totalXP), maxMIFound=str(maxMIFound), splitGold=str(round(splitGold)),
splitXP=str(round(splitXP)), numPlayers=str(numPlayers), sheetlink=str(sheetlink))
embed = discord.Embed(
title="Session Rewards",
description=msgOut,
colour=commandHelpers.getRandomHexColor()
)
await ctx.send(ctx.message.channel, embed=embed)
@commands.command(description='This is a command that calculates Pathfinder session rewards')
async def pfxp(self, ctx, pcLevel, hoursPlayed, difficultyCoefficient=1.0):
hoursPlayed = commandHelpers.round_nearest_half(float(hoursPlayed))
print("difficultyCoefficient = " + str(difficultyCoefficient))
selectedRow = (pfxpRows[int(pcLevel)])
calculatedXP = int(selectedRow['xpHr']) * hoursPlayed * difficultyCoefficient
calculatedXP = int(calculatedXP)
calculatedGP = int(selectedRow['gpHr']) * hoursPlayed * difficultyCoefficient
calculatedGP = int(calculatedGP)
msgOut = """
```md
Pathfinder one shot rewards for a level {pcLevel} character, adjusted to {hoursPlayed} hours played.
Difficulty Modifier: {difficultyCoefficient}
XP: {calculatedXP}
Gold: {calculatedGP}```"""
msgOut = msgOut.format(difficultyCoefficient=str(difficultyCoefficient), pcLevel=str(pcLevel),
hoursPlayed=str(hoursPlayed), calculatedXP=str(calculatedXP),
calculatedGP=str(calculatedGP))
await ctx.send(msgOut)
def setup(client):
client.add_cog(UngroupedCommands(client))
| true | true |
1c31f5eda1f264abdaca5c1a26c381a3dde81e5f | 6,829 | py | Python | scripts/rpc/lvol.py | michalwy/spdk | 2389caa4f51583425efd993d7066021b17e97ff3 | [
"BSD-3-Clause"
] | 2,107 | 2015-09-23T01:53:51.000Z | 2022-03-29T09:55:13.000Z | scripts/rpc/lvol.py | michalwy/spdk | 2389caa4f51583425efd993d7066021b17e97ff3 | [
"BSD-3-Clause"
] | 2,382 | 2015-09-24T02:36:59.000Z | 2022-03-31T22:53:45.000Z | scripts/rpc/lvol.py | michalwy/spdk | 2389caa4f51583425efd993d7066021b17e97ff3 | [
"BSD-3-Clause"
] | 916 | 2015-09-23T03:04:41.000Z | 2022-03-31T05:45:04.000Z | from .helpers import deprecated_alias
@deprecated_alias('construct_lvol_store')
def bdev_lvol_create_lvstore(client, bdev_name, lvs_name, cluster_sz=None, clear_method=None):
"""Construct a logical volume store.
Args:
bdev_name: bdev on which to construct logical volume store
lvs_name: name of the logical volume store to create
cluster_sz: cluster size of the logical volume store in bytes (optional)
clear_method: Change clear method for data region. Available: none, unmap, write_zeroes (optional)
Returns:
UUID of created logical volume store.
"""
params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
if cluster_sz:
params['cluster_sz'] = cluster_sz
if clear_method:
params['clear_method'] = clear_method
return client.call('bdev_lvol_create_lvstore', params)
@deprecated_alias('rename_lvol_store')
def bdev_lvol_rename_lvstore(client, old_name, new_name):
"""Rename a logical volume store.
Args:
old_name: existing logical volume store name
new_name: new logical volume store name
"""
params = {
'old_name': old_name,
'new_name': new_name
}
return client.call('bdev_lvol_rename_lvstore', params)
@deprecated_alias('construct_lvol_bdev')
def bdev_lvol_create(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None, clear_method=None):
"""Create a logical volume on a logical volume store.
Args:
lvol_name: name of logical volume to create
size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
thin_provision: True to enable thin provisioning
uuid: UUID of logical volume store to create logical volume on (optional)
lvs_name: name of logical volume store to create logical volume on (optional)
Either uuid or lvs_name must be specified, but not both.
Returns:
Name of created logical volume block device.
"""
if (uuid and lvs_name) or (not uuid and not lvs_name):
raise ValueError("Either uuid or lvs_name must be specified, but not both")
params = {'lvol_name': lvol_name, 'size': size}
if thin_provision:
params['thin_provision'] = thin_provision
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
if clear_method:
params['clear_method'] = clear_method
return client.call('bdev_lvol_create', params)
@deprecated_alias('snapshot_lvol_bdev')
def bdev_lvol_snapshot(client, lvol_name, snapshot_name):
"""Capture a snapshot of the current state of a logical volume.
Args:
lvol_name: logical volume to create a snapshot from
snapshot_name: name for the newly created snapshot
Returns:
Name of created logical volume snapshot.
"""
params = {
'lvol_name': lvol_name,
'snapshot_name': snapshot_name
}
return client.call('bdev_lvol_snapshot', params)
@deprecated_alias('clone_lvol_bdev')
def bdev_lvol_clone(client, snapshot_name, clone_name):
"""Create a logical volume based on a snapshot.
Args:
snapshot_name: snapshot to clone
clone_name: name of logical volume to create
Returns:
Name of created logical volume clone.
"""
params = {
'snapshot_name': snapshot_name,
'clone_name': clone_name
}
return client.call('bdev_lvol_clone', params)
@deprecated_alias('rename_lvol_bdev')
def bdev_lvol_rename(client, old_name, new_name):
"""Rename a logical volume.
Args:
old_name: existing logical volume name
new_name: new logical volume name
"""
params = {
'old_name': old_name,
'new_name': new_name
}
return client.call('bdev_lvol_rename', params)
@deprecated_alias('resize_lvol_bdev')
def bdev_lvol_resize(client, name, size):
"""Resize a logical volume.
Args:
name: name of logical volume to resize
size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
"""
params = {
'name': name,
'size': size,
}
return client.call('bdev_lvol_resize', params)
@deprecated_alias('set_read_only_lvol_bdev')
def bdev_lvol_set_read_only(client, name):
"""Mark logical volume as read only.
Args:
name: name of logical volume to set as read only
"""
params = {
'name': name,
}
return client.call('bdev_lvol_set_read_only', params)
@deprecated_alias('destroy_lvol_bdev')
def bdev_lvol_delete(client, name):
"""Destroy a logical volume.
Args:
name: name of logical volume to destroy
"""
params = {
'name': name,
}
return client.call('bdev_lvol_delete', params)
@deprecated_alias('inflate_lvol_bdev')
def bdev_lvol_inflate(client, name):
"""Inflate a logical volume.
Args:
name: name of logical volume to inflate
"""
params = {
'name': name,
}
return client.call('bdev_lvol_inflate', params)
@deprecated_alias('decouple_parent_lvol_bdev')
def bdev_lvol_decouple_parent(client, name):
"""Decouple parent of a logical volume.
Args:
name: name of logical volume to decouple parent
"""
params = {
'name': name,
}
return client.call('bdev_lvol_decouple_parent', params)
@deprecated_alias('destroy_lvol_store')
def bdev_lvol_delete_lvstore(client, uuid=None, lvs_name=None):
"""Destroy a logical volume store.
Args:
uuid: UUID of logical volume store to destroy (optional)
lvs_name: name of logical volume store to destroy (optional)
Either uuid or lvs_name must be specified, but not both.
"""
if (uuid and lvs_name) or (not uuid and not lvs_name):
raise ValueError("Exactly one of uuid or lvs_name must be specified")
params = {}
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
return client.call('bdev_lvol_delete_lvstore', params)
@deprecated_alias('get_lvol_stores')
def bdev_lvol_get_lvstores(client, uuid=None, lvs_name=None):
"""List logical volume stores.
Args:
uuid: UUID of logical volume store to retrieve information about (optional)
lvs_name: name of logical volume store to retrieve information about (optional)
Either uuid or lvs_name may be specified, but not both.
If both uuid and lvs_name are omitted, information about all logical volume stores is returned.
"""
if (uuid and lvs_name):
raise ValueError("Exactly one of uuid or lvs_name may be specified")
params = {}
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
return client.call('bdev_lvol_get_lvstores', params)
| 29.820961 | 113 | 0.679602 | from .helpers import deprecated_alias
@deprecated_alias('construct_lvol_store')
def bdev_lvol_create_lvstore(client, bdev_name, lvs_name, cluster_sz=None, clear_method=None):
params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
if cluster_sz:
params['cluster_sz'] = cluster_sz
if clear_method:
params['clear_method'] = clear_method
return client.call('bdev_lvol_create_lvstore', params)
@deprecated_alias('rename_lvol_store')
def bdev_lvol_rename_lvstore(client, old_name, new_name):
params = {
'old_name': old_name,
'new_name': new_name
}
return client.call('bdev_lvol_rename_lvstore', params)
@deprecated_alias('construct_lvol_bdev')
def bdev_lvol_create(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None, clear_method=None):
if (uuid and lvs_name) or (not uuid and not lvs_name):
raise ValueError("Either uuid or lvs_name must be specified, but not both")
params = {'lvol_name': lvol_name, 'size': size}
if thin_provision:
params['thin_provision'] = thin_provision
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
if clear_method:
params['clear_method'] = clear_method
return client.call('bdev_lvol_create', params)
@deprecated_alias('snapshot_lvol_bdev')
def bdev_lvol_snapshot(client, lvol_name, snapshot_name):
params = {
'lvol_name': lvol_name,
'snapshot_name': snapshot_name
}
return client.call('bdev_lvol_snapshot', params)
@deprecated_alias('clone_lvol_bdev')
def bdev_lvol_clone(client, snapshot_name, clone_name):
params = {
'snapshot_name': snapshot_name,
'clone_name': clone_name
}
return client.call('bdev_lvol_clone', params)
@deprecated_alias('rename_lvol_bdev')
def bdev_lvol_rename(client, old_name, new_name):
params = {
'old_name': old_name,
'new_name': new_name
}
return client.call('bdev_lvol_rename', params)
@deprecated_alias('resize_lvol_bdev')
def bdev_lvol_resize(client, name, size):
params = {
'name': name,
'size': size,
}
return client.call('bdev_lvol_resize', params)
@deprecated_alias('set_read_only_lvol_bdev')
def bdev_lvol_set_read_only(client, name):
params = {
'name': name,
}
return client.call('bdev_lvol_set_read_only', params)
@deprecated_alias('destroy_lvol_bdev')
def bdev_lvol_delete(client, name):
params = {
'name': name,
}
return client.call('bdev_lvol_delete', params)
@deprecated_alias('inflate_lvol_bdev')
def bdev_lvol_inflate(client, name):
params = {
'name': name,
}
return client.call('bdev_lvol_inflate', params)
@deprecated_alias('decouple_parent_lvol_bdev')
def bdev_lvol_decouple_parent(client, name):
params = {
'name': name,
}
return client.call('bdev_lvol_decouple_parent', params)
@deprecated_alias('destroy_lvol_store')
def bdev_lvol_delete_lvstore(client, uuid=None, lvs_name=None):
if (uuid and lvs_name) or (not uuid and not lvs_name):
raise ValueError("Exactly one of uuid or lvs_name must be specified")
params = {}
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
return client.call('bdev_lvol_delete_lvstore', params)
@deprecated_alias('get_lvol_stores')
def bdev_lvol_get_lvstores(client, uuid=None, lvs_name=None):
if (uuid and lvs_name):
raise ValueError("Exactly one of uuid or lvs_name may be specified")
params = {}
if uuid:
params['uuid'] = uuid
if lvs_name:
params['lvs_name'] = lvs_name
return client.call('bdev_lvol_get_lvstores', params)
| true | true |
1c31f67fc9203c9333792670dfa9525e9a6a035e | 3,050 | py | Python | numpyro/distributions/__init__.py | hessammehr/numpyro | d0f9a46e81d4dae79a49cb4f5d18354a6587c961 | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/__init__.py | hessammehr/numpyro | d0f9a46e81d4dae79a49cb4f5d18354a6587c961 | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/__init__.py | hessammehr/numpyro | d0f9a46e81d4dae79a49cb4f5d18354a6587c961 | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from numpyro.distributions.conjugate import (
BetaBinomial,
DirichletMultinomial,
GammaPoisson,
)
from numpyro.distributions.continuous import (
LKJ,
Beta,
Cauchy,
Chi2,
Dirichlet,
Exponential,
Gamma,
GaussianRandomWalk,
Gumbel,
HalfCauchy,
HalfNormal,
InverseGamma,
Laplace,
LKJCholesky,
Logistic,
LogNormal,
LowRankMultivariateNormal,
MultivariateNormal,
Normal,
Pareto,
SoftLaplace,
StudentT,
Uniform,
)
from numpyro.distributions.directional import ProjectedNormal, VonMises
from numpyro.distributions.discrete import (
Bernoulli,
BernoulliLogits,
BernoulliProbs,
Binomial,
BinomialLogits,
BinomialProbs,
Categorical,
CategoricalLogits,
CategoricalProbs,
Geometric,
GeometricLogits,
GeometricProbs,
Multinomial,
MultinomialLogits,
MultinomialProbs,
OrderedLogistic,
Poisson,
PRNGIdentity,
ZeroInflatedPoisson,
)
from numpyro.distributions.distribution import (
Delta,
Distribution,
ExpandedDistribution,
ImproperUniform,
Independent,
MaskedDistribution,
TransformedDistribution,
Unit,
)
from numpyro.distributions.kl import kl_divergence
from numpyro.distributions.transforms import biject_to
from numpyro.distributions.truncated import (
LeftTruncatedDistribution,
RightTruncatedDistribution,
TruncatedCauchy,
TruncatedDistribution,
TruncatedNormal,
TruncatedPolyaGamma,
TwoSidedTruncatedDistribution,
)
from . import constraints, transforms
__all__ = [
"biject_to",
"constraints",
"kl_divergence",
"transforms",
"Bernoulli",
"BernoulliLogits",
"BernoulliProbs",
"Beta",
"BetaBinomial",
"Binomial",
"BinomialLogits",
"BinomialProbs",
"Categorical",
"CategoricalLogits",
"CategoricalProbs",
"Cauchy",
"Chi2",
"Delta",
"Dirichlet",
"DirichletMultinomial",
"Distribution",
"Exponential",
"ExpandedDistribution",
"Gamma",
"GammaPoisson",
"GaussianRandomWalk",
"Geometric",
"GeometricLogits",
"GeometricProbs",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"ImproperUniform",
"Independent",
"InverseGamma",
"LKJ",
"LKJCholesky",
"Laplace",
"LeftTruncatedDistribution",
"Logistic",
"LogNormal",
"MaskedDistribution",
"Multinomial",
"MultinomialLogits",
"MultinomialProbs",
"MultivariateNormal",
"LowRankMultivariateNormal",
"Normal",
"OrderedLogistic",
"Pareto",
"Poisson",
"ProjectedNormal",
"PRNGIdentity",
"RightTruncatedDistribution",
"SoftLaplace",
"StudentT",
"TransformedDistribution",
"TruncatedCauchy",
"TruncatedDistribution",
"TruncatedNormal",
"TruncatedPolyaGamma",
"TwoSidedTruncatedDistribution",
"Uniform",
"Unit",
"VonMises",
"ZeroInflatedPoisson",
]
| 20.608108 | 71 | 0.676066 |
from numpyro.distributions.conjugate import (
BetaBinomial,
DirichletMultinomial,
GammaPoisson,
)
from numpyro.distributions.continuous import (
LKJ,
Beta,
Cauchy,
Chi2,
Dirichlet,
Exponential,
Gamma,
GaussianRandomWalk,
Gumbel,
HalfCauchy,
HalfNormal,
InverseGamma,
Laplace,
LKJCholesky,
Logistic,
LogNormal,
LowRankMultivariateNormal,
MultivariateNormal,
Normal,
Pareto,
SoftLaplace,
StudentT,
Uniform,
)
from numpyro.distributions.directional import ProjectedNormal, VonMises
from numpyro.distributions.discrete import (
Bernoulli,
BernoulliLogits,
BernoulliProbs,
Binomial,
BinomialLogits,
BinomialProbs,
Categorical,
CategoricalLogits,
CategoricalProbs,
Geometric,
GeometricLogits,
GeometricProbs,
Multinomial,
MultinomialLogits,
MultinomialProbs,
OrderedLogistic,
Poisson,
PRNGIdentity,
ZeroInflatedPoisson,
)
from numpyro.distributions.distribution import (
Delta,
Distribution,
ExpandedDistribution,
ImproperUniform,
Independent,
MaskedDistribution,
TransformedDistribution,
Unit,
)
from numpyro.distributions.kl import kl_divergence
from numpyro.distributions.transforms import biject_to
from numpyro.distributions.truncated import (
LeftTruncatedDistribution,
RightTruncatedDistribution,
TruncatedCauchy,
TruncatedDistribution,
TruncatedNormal,
TruncatedPolyaGamma,
TwoSidedTruncatedDistribution,
)
from . import constraints, transforms
__all__ = [
"biject_to",
"constraints",
"kl_divergence",
"transforms",
"Bernoulli",
"BernoulliLogits",
"BernoulliProbs",
"Beta",
"BetaBinomial",
"Binomial",
"BinomialLogits",
"BinomialProbs",
"Categorical",
"CategoricalLogits",
"CategoricalProbs",
"Cauchy",
"Chi2",
"Delta",
"Dirichlet",
"DirichletMultinomial",
"Distribution",
"Exponential",
"ExpandedDistribution",
"Gamma",
"GammaPoisson",
"GaussianRandomWalk",
"Geometric",
"GeometricLogits",
"GeometricProbs",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"ImproperUniform",
"Independent",
"InverseGamma",
"LKJ",
"LKJCholesky",
"Laplace",
"LeftTruncatedDistribution",
"Logistic",
"LogNormal",
"MaskedDistribution",
"Multinomial",
"MultinomialLogits",
"MultinomialProbs",
"MultivariateNormal",
"LowRankMultivariateNormal",
"Normal",
"OrderedLogistic",
"Pareto",
"Poisson",
"ProjectedNormal",
"PRNGIdentity",
"RightTruncatedDistribution",
"SoftLaplace",
"StudentT",
"TransformedDistribution",
"TruncatedCauchy",
"TruncatedDistribution",
"TruncatedNormal",
"TruncatedPolyaGamma",
"TwoSidedTruncatedDistribution",
"Uniform",
"Unit",
"VonMises",
"ZeroInflatedPoisson",
]
| true | true |
1c31f688fb9919f97f9783ddc962d1788f56d98c | 1,222 | py | Python | tools/project-creator/Python2.6.6/Lib/test/test_undocumented_details.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 81 | 2017-03-13T08:24:01.000Z | 2021-04-02T09:48:38.000Z | tools/project-creator/Python2.6.6/Lib/test/test_undocumented_details.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 6 | 2017-04-30T08:36:55.000Z | 2017-09-22T01:37:28.000Z | tools/project-creator/Python2.6.6/Lib/test/test_undocumented_details.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 41 | 2017-03-18T14:11:58.000Z | 2021-04-14T05:06:09.000Z | from test.test_support import run_unittest, _check_py3k_warnings
import unittest
import sys
class TestImplementationComparisons(unittest.TestCase):
def test_type_comparisons(self):
self.assertTrue(str < int or str > int)
self.assertTrue(int <= str or int >= str)
self.assertTrue(cmp(int, str) != 0)
self.assertTrue(int is int)
self.assertTrue(str == str)
self.assertTrue(int != str)
def test_cell_comparisons(self):
def f(x):
if x:
y = 1
def g():
return x
def h():
return y
return g, h
g, h = f(0)
g_cell, = g.func_closure
h_cell, = h.func_closure
self.assertTrue(h_cell < g_cell)
self.assertTrue(g_cell >= h_cell)
self.assertEqual(cmp(g_cell, h_cell), 1)
self.assertTrue(g_cell is g_cell)
self.assertTrue(g_cell == g_cell)
self.assertTrue(h_cell == h_cell)
self.assertTrue(g_cell != h_cell)
def test_main():
with _check_py3k_warnings():
run_unittest(TestImplementationComparisons)
if __name__ == '__main__':
test_main()
| 29.804878 | 65 | 0.576105 | from test.test_support import run_unittest, _check_py3k_warnings
import unittest
import sys
class TestImplementationComparisons(unittest.TestCase):
def test_type_comparisons(self):
self.assertTrue(str < int or str > int)
self.assertTrue(int <= str or int >= str)
self.assertTrue(cmp(int, str) != 0)
self.assertTrue(int is int)
self.assertTrue(str == str)
self.assertTrue(int != str)
def test_cell_comparisons(self):
def f(x):
if x:
y = 1
def g():
return x
def h():
return y
return g, h
g, h = f(0)
g_cell, = g.func_closure
h_cell, = h.func_closure
self.assertTrue(h_cell < g_cell)
self.assertTrue(g_cell >= h_cell)
self.assertEqual(cmp(g_cell, h_cell), 1)
self.assertTrue(g_cell is g_cell)
self.assertTrue(g_cell == g_cell)
self.assertTrue(h_cell == h_cell)
self.assertTrue(g_cell != h_cell)
def test_main():
with _check_py3k_warnings():
run_unittest(TestImplementationComparisons)
if __name__ == '__main__':
test_main()
| true | true |
1c31f6ea35b6973d6c217223464a76192228dec4 | 2,983 | py | Python | blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/pool_op_common.py | Ru-Xiang/x-deeplearning | 04cc0497150920c64b06bb8c314ef89977a3427a | [
"Apache-2.0"
] | 4,071 | 2018-12-13T04:17:38.000Z | 2022-03-30T03:29:35.000Z | blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/pool_op_common.py | laozhuang727/x-deeplearning | 781545783a4e2bbbda48fc64318fb2c6d8bbb3cc | [
"Apache-2.0"
] | 359 | 2018-12-21T01:14:57.000Z | 2022-02-15T07:18:02.000Z | blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/pool_op_common.py | laozhuang727/x-deeplearning | 781545783a4e2bbbda48fc64318fb2c6d8bbb3cc | [
"Apache-2.0"
] | 1,054 | 2018-12-20T09:57:42.000Z | 2022-03-29T07:16:53.000Z | import numpy as np # type: ignore
import itertools
from typing import Text, Sequence
def get_pad_shape(auto_pad, # type: Text
input_spatial_shape, # type: np.ndarray
kernel_spatial_shape, # type: np.ndarray
strides_spatial, # type: Sequence[int]
output_spatial_shape # type: Sequence[int]
): # type: (...) -> Sequence[int]
pad_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial[i] + kernel_spatial_shape[i] - \
input_spatial_shape[i]
elif auto_pad == 'VALID':
pass
return pad_shape
def get_output_shape(auto_pad, # type: Text
input_spatial_shape, # type: np.ndarray
kernel_spatial_shape, # type: np.ndarray
strides_spatial # type: Sequence[int]
): # type: (...) -> Sequence[int]
out_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
out_shape[i] = int(np.ceil(float(input_spatial_shape[i]) / float(strides_spatial[i])))
elif auto_pad == 'VALID':
for i in range(len(input_spatial_shape)):
out_shape[i] = int(
np.ceil(float(input_spatial_shape[i] - (kernel_spatial_shape[i] - 1)) / float(strides_spatial[i])))
return out_shape
def pool(padded, # type: np.ndarray
x_shape, # type: np.ndarray
kernel_shape, # type: Sequence[int]
strides_shape, # type: Sequence[int]
out_shape, # type: Sequence[int]
pad_shape, # type: Sequence[int]
pooling_type # type: Text
): # type: (...) -> np.ndarray
spatial_size = len(x_shape) - 2
y = np.zeros([x_shape[0], x_shape[1]] + list(out_shape))
for shape in itertools.product(range(x_shape[0]),
range(x_shape[1]),
*[range(
int((x_shape[i + 2] + pad_shape[i] - kernel_shape[i]) / strides_shape[i] + 1))
for i in range(spatial_size)]):
window = padded[shape[0], shape[1]]
window_vals = np.array([window[i] for i in list(
itertools.product(
*[range(strides_shape[i] * shape[i + 2], strides_shape[i] * shape[i + 2] + kernel_shape[i]) for i in
range(spatial_size)])
)])
if pooling_type == 'AVG':
f = np.average
elif pooling_type == 'MAX':
f = np.max
else:
raise NotImplementedError('Pooling type {} does not support. Should be AVG, MAX'.format(pooling_type))
y[shape] = f(window_vals[np.where(~np.isnan(window_vals))])
return y.astype(np.float32)
| 43.867647 | 117 | 0.555816 | import numpy as np
import itertools
from typing import Text, Sequence
def get_pad_shape(auto_pad,
input_spatial_shape,
kernel_spatial_shape,
strides_spatial,
output_spatial_shape
):
pad_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial[i] + kernel_spatial_shape[i] - \
input_spatial_shape[i]
elif auto_pad == 'VALID':
pass
return pad_shape
def get_output_shape(auto_pad,
input_spatial_shape,
kernel_spatial_shape,
strides_spatial
):
out_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
out_shape[i] = int(np.ceil(float(input_spatial_shape[i]) / float(strides_spatial[i])))
elif auto_pad == 'VALID':
for i in range(len(input_spatial_shape)):
out_shape[i] = int(
np.ceil(float(input_spatial_shape[i] - (kernel_spatial_shape[i] - 1)) / float(strides_spatial[i])))
return out_shape
def pool(padded,
x_shape,
kernel_shape,
strides_shape,
out_shape,
pad_shape,
pooling_type
):
spatial_size = len(x_shape) - 2
y = np.zeros([x_shape[0], x_shape[1]] + list(out_shape))
for shape in itertools.product(range(x_shape[0]),
range(x_shape[1]),
*[range(
int((x_shape[i + 2] + pad_shape[i] - kernel_shape[i]) / strides_shape[i] + 1))
for i in range(spatial_size)]):
window = padded[shape[0], shape[1]]
window_vals = np.array([window[i] for i in list(
itertools.product(
*[range(strides_shape[i] * shape[i + 2], strides_shape[i] * shape[i + 2] + kernel_shape[i]) for i in
range(spatial_size)])
)])
if pooling_type == 'AVG':
f = np.average
elif pooling_type == 'MAX':
f = np.max
else:
raise NotImplementedError('Pooling type {} does not support. Should be AVG, MAX'.format(pooling_type))
y[shape] = f(window_vals[np.where(~np.isnan(window_vals))])
return y.astype(np.float32)
| true | true |
1c31f80878529767fd649ae96736f4dc633dc1cd | 678 | py | Python | Exercicios/exercicio23.py | Juanszf/CursoEmVideo | 9ea6e7ef24b89c921cf6eb6b647e3ef5f467385e | [
"MIT"
] | null | null | null | Exercicios/exercicio23.py | Juanszf/CursoEmVideo | 9ea6e7ef24b89c921cf6eb6b647e3ef5f467385e | [
"MIT"
] | null | null | null | Exercicios/exercicio23.py | Juanszf/CursoEmVideo | 9ea6e7ef24b89c921cf6eb6b647e3ef5f467385e | [
"MIT"
] | null | null | null | '''Faça um programa que leia do número 0 ao 9999 e mostre cada número na tela separado:
Unidade:
Dezena:
Centana:
Milhar: '''
num = (input('Digite um número de 0 até 9999\n'))
y=-1
if num.isnumeric():
if (int(num)>= 0 and int(num) <= 9999):
for x in num:
if y == -1:
print(f'\nUnidade: {num[y]}')
elif y == -2:
print(f'Dezena: {num[y]}')
elif y == -3:
print(f'Centena: {num[y]}')
elif y == -4:
print(f'Milhar: {num[y]}')
y=y-1
else:
print('Você digitou um número inválido')
else:
print('Você digitou uma texto não numérico')
| 27.12 | 87 | 0.501475 |
num = (input('Digite um número de 0 até 9999\n'))
y=-1
if num.isnumeric():
if (int(num)>= 0 and int(num) <= 9999):
for x in num:
if y == -1:
print(f'\nUnidade: {num[y]}')
elif y == -2:
print(f'Dezena: {num[y]}')
elif y == -3:
print(f'Centena: {num[y]}')
elif y == -4:
print(f'Milhar: {num[y]}')
y=y-1
else:
print('Você digitou um número inválido')
else:
print('Você digitou uma texto não numérico')
| true | true |
1c31f85d6f23045413e37773754560d0392717bf | 1,513 | py | Python | applications/physics/ICF/train_jag_wae.py | vishalbelsare/lbann | c41421b177d8cdd4a0a780d7bb4a35a5a73a2ca2 | [
"Apache-2.0"
] | null | null | null | applications/physics/ICF/train_jag_wae.py | vishalbelsare/lbann | c41421b177d8cdd4a0a780d7bb4a35a5a73a2ca2 | [
"Apache-2.0"
] | null | null | null | applications/physics/ICF/train_jag_wae.py | vishalbelsare/lbann | c41421b177d8cdd4a0a780d7bb4a35a5a73a2ca2 | [
"Apache-2.0"
] | null | null | null | import jag_models
from os.path import abspath, dirname, join
import google.protobuf.text_format as txtf
# ==============================================
# Setup and launch experiment
# ==============================================
# Default data reader
model_zoo_dir = dirname(dirname(abspath(__file__)))
data_reader_prototext = join(model_zoo_dir,
'data',
'jag_100Kdata.prototext')
if __name__ == '__main__':
import lbann
y_dim = 16399 #image+scalar shape
z_dim = 20 #Latent space dim
num_epochs = 100
mini_batch_size = 128
trainer = lbann.Trainer(mini_batch_size=mini_batch_size,
serialize_io=True)
model = jag_models.construct_jag_wae_model(y_dim=y_dim,
z_dim=z_dim,
num_epochs=num_epochs)
# Setup optimizer
opt = lbann.Adam(learn_rate=0.0001,beta1=0.9,beta2=0.99,eps=1e-8)
# Load data reader from prototext
data_reader_proto = lbann.lbann_pb2.LbannPB()
with open(data_reader_prototext, 'r') as f:
txtf.Merge(f.read(), data_reader_proto)
data_reader_proto = data_reader_proto.data_reader
status = lbann.run(trainer,model, data_reader_proto, opt,
scheduler='slurm',
nodes=1,
procs_per_node=1,
time_limit=360,
job_name='jag_wae')
print(status)
| 36.02381 | 69 | 0.554527 | import jag_models
from os.path import abspath, dirname, join
import google.protobuf.text_format as txtf
model_zoo_dir = dirname(dirname(abspath(__file__)))
data_reader_prototext = join(model_zoo_dir,
'data',
'jag_100Kdata.prototext')
if __name__ == '__main__':
import lbann
y_dim = 16399
z_dim = 20
num_epochs = 100
mini_batch_size = 128
trainer = lbann.Trainer(mini_batch_size=mini_batch_size,
serialize_io=True)
model = jag_models.construct_jag_wae_model(y_dim=y_dim,
z_dim=z_dim,
num_epochs=num_epochs)
opt = lbann.Adam(learn_rate=0.0001,beta1=0.9,beta2=0.99,eps=1e-8)
data_reader_proto = lbann.lbann_pb2.LbannPB()
with open(data_reader_prototext, 'r') as f:
txtf.Merge(f.read(), data_reader_proto)
data_reader_proto = data_reader_proto.data_reader
status = lbann.run(trainer,model, data_reader_proto, opt,
scheduler='slurm',
nodes=1,
procs_per_node=1,
time_limit=360,
job_name='jag_wae')
print(status)
| true | true |
1c31f9649f50f6ffae3c29c127cb8e14883bb8fd | 258 | py | Python | bayes_race/models/__init__.py | DaniMarts/bayesrace | 3d0d2b26dac2e33ad7e38513304cfb259abe351c | [
"MIT"
] | 23 | 2020-03-27T03:28:04.000Z | 2022-02-24T11:21:18.000Z | bayes_race/models/__init__.py | DaniMarts/bayesrace | 3d0d2b26dac2e33ad7e38513304cfb259abe351c | [
"MIT"
] | 1 | 2021-07-08T22:02:15.000Z | 2021-07-08T22:02:15.000Z | bayes_race/models/__init__.py | DaniMarts/bayesrace | 3d0d2b26dac2e33ad7e38513304cfb259abe351c | [
"MIT"
] | 17 | 2020-10-27T06:09:34.000Z | 2022-03-23T05:28:23.000Z | from bayes_race.models.kinematic import Kinematic
from bayes_race.models.kinematic6 import Kinematic6
from bayes_race.models.dynamic import Dynamic
from bayes_race.models.dynamicst import DynamicsST
from bayes_race.models.frictioncircle import FrictionCircle | 51.6 | 59 | 0.887597 | from bayes_race.models.kinematic import Kinematic
from bayes_race.models.kinematic6 import Kinematic6
from bayes_race.models.dynamic import Dynamic
from bayes_race.models.dynamicst import DynamicsST
from bayes_race.models.frictioncircle import FrictionCircle | true | true |
1c31fa312978d713a42ff2753694165e3abc29be | 5,186 | py | Python | scdiff2/prerun.py | haochenucr/scdiff2 | ebc4149851399b2f15ed5b5874d44764b5f130fb | [
"MIT"
] | 6 | 2020-08-02T23:13:43.000Z | 2021-12-12T03:53:57.000Z | scdiff2/prerun.py | haochenucr/scdiff2 | ebc4149851399b2f15ed5b5874d44764b5f130fb | [
"MIT"
] | 8 | 2020-07-11T12:24:45.000Z | 2021-07-31T04:25:35.000Z | scdiff2/prerun.py | haochenucr/scdiff2 | ebc4149851399b2f15ed5b5874d44764b5f130fb | [
"MIT"
] | 2 | 2020-10-07T22:39:00.000Z | 2022-01-17T20:07:53.000Z | #!/usr/bin/env python
# coding: utf-8
# Author: Jun Ding
# Email: junding (at) cs (dot) cmu (dot) edu
# Date: June. 29th, 2020
#
# This scdiff software suite is desinged to infer the clusters, trajectories, and regulatory
# networks underlying dynamic biological process (e.g., cell differntiation, disease progression)
# based on given time-series single-cell expression input data. Please use "scdiff -h" for the detailed usage.
#
# This scdiff prerun program use scanpy package to learn the initial clusters/trajectories, which will be used as the input
# to the scdiff2 main program to learn the detailed underlying regulatory networks and refined trajectories.
#
# This software is freely avaible for academic uses.
# For any commerical usage, please contact me at the email address above.
# All rights reserved.
# Please don NOT modify the above statement.
# In[1]:
import pdb,sys,os
import anndata
import scanpy as sc
from File import *
import pandas as pd
import argparse
import matplotlib
matplotlib.use('Agg')
def prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter):
# # read in tab.txt file and save it to h5file
if os.path.exists(outdir)==False:
os.mkdir(outdir)
TabFile(exFn).toH5("\t","%s/%s"%(outdir,exFn.split("/")[-1]),['index','time','label'])
H5File("%s/%s.h5"%(outdir,exFn)).toSparseAnnData("%s/%s.h5ad"%(outdir,exFn),BLOCK=5000)
# # Load in h5 file and convert it to anndata
d1=anndata.read_h5ad("%s/%s.h5ad"%(outdir,exFn))
sc.settings.figdir = '%s/figures'%(outdir)
# # Pre-processing ...
print("pre-processing...")
sc.pp.filter_cells(d1,min_genes=200)
sc.pp.filter_genes(d1,min_cells=3)
if iformat=='raw':
MTFlag1=d1.var_names.str.upper().str.startswith('MT-')
MTFlag2=d1.var_names.str.upper().str.startswith('MT.')
MTFlag=[bool(a+b) for a,b in zip(MTFlag1,MTFlag2)]
d1.var['mt'] = MTFlag
# # plot n_genes, total_counts, and mt counts
sc.pp.calculate_qc_metrics(d1, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)
#sc.pl.violin(d1, ['n_genes_by_counts', 'total_counts', 'pct_counts_mt'],jitter=0.4, multi_panel=True, show=False, save="_qc.pdf")
sc.pl.scatter(d1, x='total_counts', y='pct_counts_mt',show=False, save="_mt.pdf")
sc.pl.scatter(d1, x='total_counts', y='n_genes_by_counts',show=False, save="_n_genes.pdf")
d1 = d1[d1.obs.pct_counts_mt < 40, :]
sc.pp.normalize_total(d1, target_sum=1e4)
sc.pp.log1p(d1)
# # filtering genes based on dispersion
if (skipGeneFilter!='Yes') and (skipGeneFilter!='YES'):
sc.pp.highly_variable_genes(d1, min_mean=0.0125, max_mean=5, min_disp=mindisp)
sc.pl.highly_variable_genes(d1,show=False, save=".pdf")
d1 = d1[:, d1.var.highly_variable]
# # Removing batch effects
#sc.pp.regress_out(d1, ['total_counts', 'pct_counts_mt'])
#sc.pp.scale(d1, max_value=10)
# # Dimension reduction
sc.tl.pca(d1, svd_solver='arpack')
# # Computing the neighborhood graph
sc.pp.neighbors(d1, n_neighbors=15, n_pcs=50)
sc.tl.diffmap(d1)
# # clustering...
sc.tl.leiden(d1,resolution=cluRes)
sc.tl.paga(d1)
sc.pl.paga(d1,show=False,save="_Traj.pdf")
sc.tl.umap(d1,init_pos='paga')
sc.pl.umap(d1,color=['leiden','time'],legend_loc='on data',show=False,save="_clustering.pdf")
# # get DE genes for each of the clusters
sc.tl.rank_genes_groups(d1, 'leiden', method='wilcoxon')
sc.pl.rank_genes_groups(d1, n_genes=25, sharey=False,show=False, save="_global_DE_genes.pdf")
# #
d1.write_h5ad("%s/%s.h5ad"%(outdir,exFn),compression=9)
print("\n\n>>>>------------------------------------------------<<<<")
print("prerun completed! please run scdiff2 for the second pass")
return d1
def main():
parser=argparse.ArgumentParser(description="scdiff2 pre-run")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('-i','--input',required=True,help='input single cell RNA-seq expression data')
required.add_argument('-o','--output',required=True,help='output directory')
optional.add_argument('-f','--format',required=False, default='raw', help='the format of input expression, either raw/norm (raw: raw read counts, norm: normalized expression')
optional.add_argument('--mindisp',required=False,default=0.15,help='the dispersion cutoff to filter genes (genes with dipsersion < this cutoff will be filtered')
optional.add_argument('--cluRes',required=False, default=1, help="The resolution parameter for the leiden clustering method")
optional.add_argument('--skipGeneFilter', required=False, default=None, help="whether to skip the gene filtering (Yes to skip)")
args = parser.parse_args()
exFn=args.input
outdir=args.output
iformat=args.format
mindisp=float(args.mindisp)
cluRes=float(args.cluRes)
skipGeneFilter=args.skipGeneFilter
prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter)
if __name__=="__main__":
main()
| 42.162602 | 179 | 0.688199 |
import pdb,sys,os
import anndata
import scanpy as sc
from File import *
import pandas as pd
import argparse
import matplotlib
matplotlib.use('Agg')
def prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter):
os.mkdir(outdir)
TabFile(exFn).toH5("\t","%s/%s"%(outdir,exFn.split("/")[-1]),['index','time','label'])
H5File("%s/%s.h5"%(outdir,exFn)).toSparseAnnData("%s/%s.h5ad"%(outdir,exFn),BLOCK=5000)
dir,exFn))
sc.settings.figdir = '%s/figures'%(outdir)
essing...")
sc.pp.filter_cells(d1,min_genes=200)
sc.pp.filter_genes(d1,min_cells=3)
if iformat=='raw':
MTFlag1=d1.var_names.str.upper().str.startswith('MT-')
MTFlag2=d1.var_names.str.upper().str.startswith('MT.')
MTFlag=[bool(a+b) for a,b in zip(MTFlag1,MTFlag2)]
d1.var['mt'] = MTFlag
vars=['mt'], percent_top=None, log1p=False, inplace=True)
sc.pl.scatter(d1, x='total_counts', y='pct_counts_mt',show=False, save="_mt.pdf")
sc.pl.scatter(d1, x='total_counts', y='n_genes_by_counts',show=False, save="_n_genes.pdf")
d1 = d1[d1.obs.pct_counts_mt < 40, :]
sc.pp.normalize_total(d1, target_sum=1e4)
sc.pp.log1p(d1)
skipGeneFilter!='YES'):
sc.pp.highly_variable_genes(d1, min_mean=0.0125, max_mean=5, min_disp=mindisp)
sc.pl.highly_variable_genes(d1,show=False, save=".pdf")
d1 = d1[:, d1.var.highly_variable]
='arpack')
rs=15, n_pcs=50)
sc.tl.diffmap(d1)
n(d1,resolution=cluRes)
sc.tl.paga(d1)
sc.pl.paga(d1,show=False,save="_Traj.pdf")
sc.tl.umap(d1,init_pos='paga')
sc.pl.umap(d1,color=['leiden','time'],legend_loc='on data',show=False,save="_clustering.pdf")
n', method='wilcoxon')
sc.pl.rank_genes_groups(d1, n_genes=25, sharey=False,show=False, save="_global_DE_genes.pdf")
d1.write_h5ad("%s/%s.h5ad"%(outdir,exFn),compression=9)
print("\n\n>>>>------------------------------------------------<<<<")
print("prerun completed! please run scdiff2 for the second pass")
return d1
def main():
parser=argparse.ArgumentParser(description="scdiff2 pre-run")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('-i','--input',required=True,help='input single cell RNA-seq expression data')
required.add_argument('-o','--output',required=True,help='output directory')
optional.add_argument('-f','--format',required=False, default='raw', help='the format of input expression, either raw/norm (raw: raw read counts, norm: normalized expression')
optional.add_argument('--mindisp',required=False,default=0.15,help='the dispersion cutoff to filter genes (genes with dipsersion < this cutoff will be filtered')
optional.add_argument('--cluRes',required=False, default=1, help="The resolution parameter for the leiden clustering method")
optional.add_argument('--skipGeneFilter', required=False, default=None, help="whether to skip the gene filtering (Yes to skip)")
args = parser.parse_args()
exFn=args.input
outdir=args.output
iformat=args.format
mindisp=float(args.mindisp)
cluRes=float(args.cluRes)
skipGeneFilter=args.skipGeneFilter
prerun(exFn,outdir,iformat,mindisp,cluRes,skipGeneFilter)
if __name__=="__main__":
main()
| true | true |
1c31fa3d8622c8b1cdd04a8dc4e6ff913cb0eb43 | 16,761 | py | Python | CompuRacer_Core/src/batch_sender_async.py | computestdev/CompuRacer | c212c4b582ae0b6316a73ecd6868b6b69de224a6 | [
"MIT"
] | 1 | 2021-12-16T16:22:28.000Z | 2021-12-16T16:22:28.000Z | CompuRacer_Core/src/batch_sender_async.py | computestdev/CompuRacer | c212c4b582ae0b6316a73ecd6868b6b69de224a6 | [
"MIT"
] | null | null | null | CompuRacer_Core/src/batch_sender_async.py | computestdev/CompuRacer | c212c4b582ae0b6316a73ecd6868b6b69de224a6 | [
"MIT"
] | 2 | 2019-05-23T09:06:25.000Z | 2021-07-07T10:33:58.000Z | #!/usr/bin/env python3
"""
The batch async sender file contains functions for sending a Batch asynchronously and very quickly.
It will encode the requests that are send and also read and decode the results.
"""
# --- All imports --- #
import asyncio
import base64
import binascii
import copy
import datetime
import json
import pprint
import random
import sys
import time
import urllib
from collections import defaultdict
from async_timeout import timeout as async_timeout
import src.aiohttp as aiohttp
import chardet
import uvloop
from src.aiohttp import ClientSession
from aiohttp_socks import SocksConnector
import src.utils as utils
from tqdm import tqdm
from .batch import Batch
uvloop.install()
progress_bar_width = 100
# todo move to utils
def get_time_ns():
if sys.version_info >= (3, 6):
return time.time_ns()
else:
return time.time() * 1e9
def __decode_response(response):
# decode headers
response['headers'] = {}
if response['headers_temp'] and len(response['headers_temp']) > 0:
# guess encoding for headers
encoding = chardet.detect(response['headers_temp'][0][0])['encoding']
# parse headers individually (duplicates get an added number)
dups = {}
for header_item in sorted(response['headers_temp'], key=lambda x: (x[0], x[1])):
header_item_decoded = [header_item[0].decode(encoding), header_item[1].decode(encoding)]
if header_item_decoded[0] in dups:
dups[header_item_decoded[0]] += 1
header_item_decoded[0] += f"-{dups[header_item_decoded[0]]}"
else:
dups[header_item_decoded[0]] = 1
response['headers'][header_item_decoded[0]] = header_item_decoded[1]
del response['headers_temp']
# decode body
response['body'] = {}
if response['body_temp']:
encoding = chardet.detect(response['body_temp'])['encoding']
if encoding is None:
# cannot decode it --> just past it in as is
response['body'] = response['body_temp']
else:
try:
response['body'] = response['body_temp'].decode(encoding)
except UnicodeDecodeError as _:
# Sometimes vague stuff happens like:
# "UnicodeDecodeError: 'charmap' codec can't decode byte
# 0x9d in position 966: character maps to <undefined>"
response['body'] = response['body_temp']
del response['body_temp']
return response
async def __read_response(response, send_time, response_time):
result = dict({'send_time': send_time, 'response_time': response_time})
result['status_code'] = response.status
# read headers (fixed bug, allowed only one set-cookie header)
result['headers_temp'] = list(response.raw_headers)
# read body
result['body_temp'] = await response.content.read(-1)
return result
async def __my_own_sleep(wait_until):
# get sleep time minus 20 ms
sleep_time = wait_until - get_time_ns() / 1e6 - 20
# wait longest part async
if sleep_time > 0:
await asyncio.sleep(sleep_time / 1000)
# wait last 20 ms or less synchronously for more accuracy
while wait_until - get_time_ns() / 1e6 > 0:
pass
async def __a_sup_request(request_id, a_prepared_request, wait_time, wait_until, duplication, timeout, session):
responses = []
await __my_own_sleep(wait_until)
for dup in range(duplication):
# run dups sequentially
try:
async with async_timeout(timeout) as cm:
send_time = str(datetime.datetime.now())
async with session.request(**a_prepared_request) as response:
responses.append(await __read_response(response, send_time, str(datetime.datetime.now())))
if cm.expired:
raise Exception(f"Timeout of {timeout} seconds reached!")
except aiohttp.client_exceptions.ClientConnectorError as e:
return [(request_id, wait_time), e]
except asyncio.TimeoutError as e:
return [(request_id, wait_time), e]
except Exception as e:
return [(request_id, wait_time), e]
return [(request_id, wait_time), responses] # are not decoded yet
def __prepare_request(the_request, allow_redirects, final_byte_time=None):
a_request = copy.deepcopy(the_request)
request_content = {'method': a_request['method'],
'url': a_request['url'].replace("http://localhost", "http://127.0.0.1"),
'headers': a_request['headers'],
'allow_redirects': allow_redirects
}
# decode cookie header if necessary
if 'Cookie' in a_request['headers']:
request_content['headers']['Cookie'] = urllib.parse.unquote(a_request['headers']['Cookie'])
# decode and restore content if necessary
if 'Content-Type' in a_request['headers']:
if "json" in a_request['headers']['Content-Type'].lower() \
and type(a_request['body']) is str \
and a_request['body']:
request_content['json'] = utils.read_json(a_request['body'])
else:
if type(a_request['body']) is dict:
new_body = ""
for key in a_request['body'].keys():
new_body += f"{key}={a_request['body'][key]}&"
a_request['body'] = new_body
if a_request['headers']['Content-Type'].startswith("multipart/form-data"):
if a_request['body'].startswith("BASE64="):
# it came from the Burp plugin (base 64 encoded)
try:
body = base64.b64decode(str(a_request['body'].replace("BASE64=", "")))
except binascii.Error:
# conversion failed, is probably just string data
body = a_request['body']
else:
# it came from the Chrome plugin (url encoded)
parts = [item.split("=") for item in a_request['body'].split("&")][:-1]
separator = "--" + a_request['headers']['Content-Type'].split("=")[1]
body = ""
for part in parts:
body += separator + "\r\n"
body += f"Content-Disposition: form-data; name=\"{urllib.parse.unquote(part[0])}\"\r\n\r\n"
body += urllib.parse.unquote(part[1]) + "\r\n"
body += separator + "--" + "\r\n"
body = str.encode(body)
elif a_request['headers']['Content-Type'].startswith("application/x-www-form-urlencoded"):
# body = urllib.parse.unquote(a_request['body'])
body = a_request['body']
pass
else:
body = a_request['body']
request_content['data'] = body
# re-calculate content length
if 'Content-Length' in request_content['headers']:
len_data = 0
if 'data' in request_content:
len_data = len(request_content['data'])
elif 'json' in request_content:
len_data = len(json.dumps(request_content['json']))
if len_data != int(request_content['headers']['Content-Length']):
request_content['headers']['Content-Length'] = str(len_data)
# add final byte time
if final_byte_time is not None:
request_content['final_byte_time'] = final_byte_time
return request_content
# added shuffle to avoid sending all dups of one request before the other
# todo does this work well enough?
def prepare_sending_order(items):
send_order = list(items.keys())
full_send_order = []
for key in send_order:
for i in range(items[key][0]):
full_send_order.append(key)
# randomly shuffle the list
random.shuffle(full_send_order)
return full_send_order
async def run(batch, requests, proxy):
# Create client session that will ensure we don't open a new connection per each request.
# todo It is synced on the whole second part of the wall clock time to make testing in Wireshark easier.
# todo This results in at most 1.5 seconds delay and can be removed later on
wait_always = 1000 # msec, to ensure all async tasks (also with wait_time = 0) are able to make this deadline
wait_final_byte = 5000 # this is how long we wait until the final byte is sent
ns = get_time_ns()
start_time = round(ns / 1e9) * 1e3 + wait_always
start_time_str = str(datetime.datetime.fromtimestamp(start_time / 1000))
print(f"Start sending time: {start_time_str}", end="")
# prepare requests
prepared_requests = {}
req_ids = batch.get_reqs()
for req_id in req_ids:
if batch.sync_last_byte:
last_byte_time = start_time + wait_final_byte
print("\tlast byte time: " + str(datetime.datetime.fromtimestamp(last_byte_time / 1000)))
else:
last_byte_time = None
print()
prepared_requests[req_id] = __prepare_request(requests[req_id], batch.allow_redirects, last_byte_time)
tasks = []
if proxy is not None:
connector = SocksConnector.from_url(proxy, verify_ssl=False)
else:
connector = aiohttp.TCPConnector(verify_ssl=False)
async with ClientSession(connector=connector) as session:
send_order = prepare_sending_order(batch.items)
for key in send_order:
wait_time = key[1]
wait_until = start_time + wait_time
values = batch.items[key]
a_prepared_request = copy.deepcopy(prepared_requests[key[0]])
# add wait_time to final_byte_time
if 'final_byte_time' in a_prepared_request:
a_prepared_request['final_byte_time'] += wait_time
# resolve url to ip
# todo a_request['url'] = await resolve_all_to_ip(loop, [f"{a_request['url'].split('//')[0]}//{a_request['url'].split('//')[1].split('/')[0]}"])
# send request
# print(f"Sending ({values[1]}x): {utils.get_req_string(requests[key[0]], True, ['timestamp'])}")
tasks.append(asyncio.ensure_future(__a_sup_request(key[0], a_prepared_request, wait_time,
wait_until, values[1], batch.get_send_timeout(), session)))
# results = await asyncio.gather(*tasks)
results = [await f for f in tqdm(asyncio.as_completed(tasks),
total=len(tasks),
desc="Receiving ",
ncols=progress_bar_width)]
# decode all responses
responses_decoded = {'start_time': start_time_str,
'end_time': str(datetime.datetime.fromtimestamp(round(get_time_ns() / 1e9))),
'contents': defaultdict(list)}
errors = ""
for i, result in enumerate(tqdm(results,
desc="Processing",
ncols=progress_bar_width)):
if isinstance(result[1], Exception):
errors += f"Error in sending request {i} :\n{utils.tabbed_pprint_string(result, 1)}\n"
continue
for j, response in enumerate(result[1]):
response_decoded = __decode_response(response)
response_decoded['wait_time'] = result[0][1]
response_decoded['send_index'] = j
responses_decoded['contents'][result[0][0]].append(copy.deepcopy(response_decoded))
time.sleep(0.1)
print(errors)
# sort lists to send_time
for request_id in responses_decoded['contents'].keys():
responses_decoded['contents'][request_id] = sorted(responses_decoded['contents'][request_id],
key=lambda x: x['send_time'])
return responses_decoded
# todo move to utils
def get_loop(my_loop=None):
new_loop = not my_loop
if not my_loop:
# start loop
my_loop = asyncio.new_event_loop()
asyncio.set_event_loop(my_loop)
return my_loop, new_loop
# todo move to utils
def stop_loop(my_loop):
# shutdown eventloop
my_loop.stop()
my_loop.close()
def send_batch(batch, the_requests, proxy=None, my_loop=None):
my_loop, new_loop = get_loop(my_loop)
future = asyncio.ensure_future(run(batch, the_requests, proxy))
res_parsed = my_loop.run_until_complete(future)
if new_loop:
stop_loop(my_loop)
return res_parsed
def send_batches(batches, the_requests, proxy=None, my_loop=None):
my_loop, new_loop = get_loop(my_loop)
results = []
for batch in batches:
results.append(send_batch(batch, the_requests, proxy, my_loop))
if new_loop:
stop_loop(my_loop)
return results
# todo move to dedicated attack class?
def attack_session_puzzling(create_account_req, login_req):
print("sessions puzzling attack stated..")
# define two random accounts
creds = utils.random_user_credentials(2, 10)
# create requests
requests = dict({"c1": None, "c2": None, "l1": None, "l2": None})
requests['c1'] = copy.deepcopy(create_account_req)
requests['c2'] = copy.deepcopy(create_account_req)
requests['c1']['body'] = create_account_req['body'].format(creds[0]['username'], creds[0]['password'],
creds[0]['password'])
requests['c2']['body'] = create_account_req['body'].format(creds[1]['username'], creds[1]['password'],
creds[1]['password'])
requests['l1'] = copy.deepcopy(login_req)
requests['l2'] = copy.deepcopy(login_req)
requests['l1']['body'] = login_req['body'].format(creds[0]['username'], creds[0]['password'])
requests['l2']['body'] = login_req['body'].format(creds[1]['username'], creds[1]['password'])
# create batches
batches = list()
batches.append(Batch("create_accounts"))
batches[-1].add('c1', 0, 1, 1)
batches[-1].add('c2', 100, 1, 1)
batches.append(Batch("login_and_check", allow_redirects=True))
batches[-1].add('l1', 0, 10, 1)
batches[-1].add('l2', 0, 10, 1)
# start attack
pprint.pformat(f"Sending attack payload..")
results = send_batches(batches, requests)
# show results
print(pprint.pformat(f"Results:\n{results}"), )
return results
if __name__ == "__main__":
my_requests = {
"1": {
"body": "username={}&password={}&",
"headers": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8,nl;q=0.7",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "JSESSIONID=6731A59A338A1A6104DEF9E879296BF1",
"Origin": "http://127.0.0.1:8090",
"Referer": "http://127.0.0.1:8090/WebGoat/login",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
},
"method": "POST",
"timestamp": 1543315415.7996092,
"url": "http://127.0.0.1:8090/WebGoat/login",
"id": 2
},
"2": {
"body": "agree=agree&username={}&password={}&matchingPassword={}&",
"headers": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8,nl;q=0.7",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "JSESSIONID=2639A17BBAF4BAA4DE0258F80C0F82E4",
"Origin": "http://127.0.0.1:8090",
"Referer": "http://127.0.0.1:8090/WebGoat/registration",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
},
"method": "POST",
"timestamp": 1543314627.112285,
"url": "http://127.0.0.1:8090/WebGoat/register.mvc",
"id": 1
}
}
# add a single request
# batch = Batch("lol")
# batch.add("2", 0, 20, 1)
# send_batch(batch, my_requests)
# results = attack_session_puzzling(my_requests["2"], my_requests["1"])
| 42.325758 | 156 | 0.601873 |
import asyncio
import base64
import binascii
import copy
import datetime
import json
import pprint
import random
import sys
import time
import urllib
from collections import defaultdict
from async_timeout import timeout as async_timeout
import src.aiohttp as aiohttp
import chardet
import uvloop
from src.aiohttp import ClientSession
from aiohttp_socks import SocksConnector
import src.utils as utils
from tqdm import tqdm
from .batch import Batch
uvloop.install()
progress_bar_width = 100
def get_time_ns():
if sys.version_info >= (3, 6):
return time.time_ns()
else:
return time.time() * 1e9
def __decode_response(response):
response['headers'] = {}
if response['headers_temp'] and len(response['headers_temp']) > 0:
encoding = chardet.detect(response['headers_temp'][0][0])['encoding']
dups = {}
for header_item in sorted(response['headers_temp'], key=lambda x: (x[0], x[1])):
header_item_decoded = [header_item[0].decode(encoding), header_item[1].decode(encoding)]
if header_item_decoded[0] in dups:
dups[header_item_decoded[0]] += 1
header_item_decoded[0] += f"-{dups[header_item_decoded[0]]}"
else:
dups[header_item_decoded[0]] = 1
response['headers'][header_item_decoded[0]] = header_item_decoded[1]
del response['headers_temp']
response['body'] = {}
if response['body_temp']:
encoding = chardet.detect(response['body_temp'])['encoding']
if encoding is None:
response['body'] = response['body_temp']
else:
try:
response['body'] = response['body_temp'].decode(encoding)
except UnicodeDecodeError as _:
# 0x9d in position 966: character maps to <undefined>"
response['body'] = response['body_temp']
del response['body_temp']
return response
async def __read_response(response, send_time, response_time):
result = dict({'send_time': send_time, 'response_time': response_time})
result['status_code'] = response.status
# read headers (fixed bug, allowed only one set-cookie header)
result['headers_temp'] = list(response.raw_headers)
# read body
result['body_temp'] = await response.content.read(-1)
return result
async def __my_own_sleep(wait_until):
# get sleep time minus 20 ms
sleep_time = wait_until - get_time_ns() / 1e6 - 20
# wait longest part async
if sleep_time > 0:
await asyncio.sleep(sleep_time / 1000)
# wait last 20 ms or less synchronously for more accuracy
while wait_until - get_time_ns() / 1e6 > 0:
pass
async def __a_sup_request(request_id, a_prepared_request, wait_time, wait_until, duplication, timeout, session):
responses = []
await __my_own_sleep(wait_until)
for dup in range(duplication):
# run dups sequentially
try:
async with async_timeout(timeout) as cm:
send_time = str(datetime.datetime.now())
async with session.request(**a_prepared_request) as response:
responses.append(await __read_response(response, send_time, str(datetime.datetime.now())))
if cm.expired:
raise Exception(f"Timeout of {timeout} seconds reached!")
except aiohttp.client_exceptions.ClientConnectorError as e:
return [(request_id, wait_time), e]
except asyncio.TimeoutError as e:
return [(request_id, wait_time), e]
except Exception as e:
return [(request_id, wait_time), e]
return [(request_id, wait_time), responses] # are not decoded yet
def __prepare_request(the_request, allow_redirects, final_byte_time=None):
a_request = copy.deepcopy(the_request)
request_content = {'method': a_request['method'],
'url': a_request['url'].replace("http://localhost", "http://127.0.0.1"),
'headers': a_request['headers'],
'allow_redirects': allow_redirects
}
# decode cookie header if necessary
if 'Cookie' in a_request['headers']:
request_content['headers']['Cookie'] = urllib.parse.unquote(a_request['headers']['Cookie'])
# decode and restore content if necessary
if 'Content-Type' in a_request['headers']:
if "json" in a_request['headers']['Content-Type'].lower() \
and type(a_request['body']) is str \
and a_request['body']:
request_content['json'] = utils.read_json(a_request['body'])
else:
if type(a_request['body']) is dict:
new_body = ""
for key in a_request['body'].keys():
new_body += f"{key}={a_request['body'][key]}&"
a_request['body'] = new_body
if a_request['headers']['Content-Type'].startswith("multipart/form-data"):
if a_request['body'].startswith("BASE64="):
# it came from the Burp plugin (base 64 encoded)
try:
body = base64.b64decode(str(a_request['body'].replace("BASE64=", "")))
except binascii.Error:
# conversion failed, is probably just string data
body = a_request['body']
else:
# it came from the Chrome plugin (url encoded)
parts = [item.split("=") for item in a_request['body'].split("&")][:-1]
separator = "--" + a_request['headers']['Content-Type'].split("=")[1]
body = ""
for part in parts:
body += separator + "\r\n"
body += f"Content-Disposition: form-data; name=\"{urllib.parse.unquote(part[0])}\"\r\n\r\n"
body += urllib.parse.unquote(part[1]) + "\r\n"
body += separator + "--" + "\r\n"
body = str.encode(body)
elif a_request['headers']['Content-Type'].startswith("application/x-www-form-urlencoded"):
# body = urllib.parse.unquote(a_request['body'])
body = a_request['body']
pass
else:
body = a_request['body']
request_content['data'] = body
# re-calculate content length
if 'Content-Length' in request_content['headers']:
len_data = 0
if 'data' in request_content:
len_data = len(request_content['data'])
elif 'json' in request_content:
len_data = len(json.dumps(request_content['json']))
if len_data != int(request_content['headers']['Content-Length']):
request_content['headers']['Content-Length'] = str(len_data)
# add final byte time
if final_byte_time is not None:
request_content['final_byte_time'] = final_byte_time
return request_content
# added shuffle to avoid sending all dups of one request before the other
# todo does this work well enough?
def prepare_sending_order(items):
send_order = list(items.keys())
full_send_order = []
for key in send_order:
for i in range(items[key][0]):
full_send_order.append(key)
# randomly shuffle the list
random.shuffle(full_send_order)
return full_send_order
async def run(batch, requests, proxy):
# Create client session that will ensure we don't open a new connection per each request.
wait_always = 1000
wait_final_byte = 5000
ns = get_time_ns()
start_time = round(ns / 1e9) * 1e3 + wait_always
start_time_str = str(datetime.datetime.fromtimestamp(start_time / 1000))
print(f"Start sending time: {start_time_str}", end="")
prepared_requests = {}
req_ids = batch.get_reqs()
for req_id in req_ids:
if batch.sync_last_byte:
last_byte_time = start_time + wait_final_byte
print("\tlast byte time: " + str(datetime.datetime.fromtimestamp(last_byte_time / 1000)))
else:
last_byte_time = None
print()
prepared_requests[req_id] = __prepare_request(requests[req_id], batch.allow_redirects, last_byte_time)
tasks = []
if proxy is not None:
connector = SocksConnector.from_url(proxy, verify_ssl=False)
else:
connector = aiohttp.TCPConnector(verify_ssl=False)
async with ClientSession(connector=connector) as session:
send_order = prepare_sending_order(batch.items)
for key in send_order:
wait_time = key[1]
wait_until = start_time + wait_time
values = batch.items[key]
a_prepared_request = copy.deepcopy(prepared_requests[key[0]])
if 'final_byte_time' in a_prepared_request:
a_prepared_request['final_byte_time'] += wait_time
tasks.append(asyncio.ensure_future(__a_sup_request(key[0], a_prepared_request, wait_time,
wait_until, values[1], batch.get_send_timeout(), session)))
results = [await f for f in tqdm(asyncio.as_completed(tasks),
total=len(tasks),
desc="Receiving ",
ncols=progress_bar_width)]
responses_decoded = {'start_time': start_time_str,
'end_time': str(datetime.datetime.fromtimestamp(round(get_time_ns() / 1e9))),
'contents': defaultdict(list)}
errors = ""
for i, result in enumerate(tqdm(results,
desc="Processing",
ncols=progress_bar_width)):
if isinstance(result[1], Exception):
errors += f"Error in sending request {i} :\n{utils.tabbed_pprint_string(result, 1)}\n"
continue
for j, response in enumerate(result[1]):
response_decoded = __decode_response(response)
response_decoded['wait_time'] = result[0][1]
response_decoded['send_index'] = j
responses_decoded['contents'][result[0][0]].append(copy.deepcopy(response_decoded))
time.sleep(0.1)
print(errors)
for request_id in responses_decoded['contents'].keys():
responses_decoded['contents'][request_id] = sorted(responses_decoded['contents'][request_id],
key=lambda x: x['send_time'])
return responses_decoded
def get_loop(my_loop=None):
new_loop = not my_loop
if not my_loop:
my_loop = asyncio.new_event_loop()
asyncio.set_event_loop(my_loop)
return my_loop, new_loop
def stop_loop(my_loop):
my_loop.stop()
my_loop.close()
def send_batch(batch, the_requests, proxy=None, my_loop=None):
my_loop, new_loop = get_loop(my_loop)
future = asyncio.ensure_future(run(batch, the_requests, proxy))
res_parsed = my_loop.run_until_complete(future)
if new_loop:
stop_loop(my_loop)
return res_parsed
def send_batches(batches, the_requests, proxy=None, my_loop=None):
my_loop, new_loop = get_loop(my_loop)
results = []
for batch in batches:
results.append(send_batch(batch, the_requests, proxy, my_loop))
if new_loop:
stop_loop(my_loop)
return results
def attack_session_puzzling(create_account_req, login_req):
print("sessions puzzling attack stated..")
creds = utils.random_user_credentials(2, 10)
requests = dict({"c1": None, "c2": None, "l1": None, "l2": None})
requests['c1'] = copy.deepcopy(create_account_req)
requests['c2'] = copy.deepcopy(create_account_req)
requests['c1']['body'] = create_account_req['body'].format(creds[0]['username'], creds[0]['password'],
creds[0]['password'])
requests['c2']['body'] = create_account_req['body'].format(creds[1]['username'], creds[1]['password'],
creds[1]['password'])
requests['l1'] = copy.deepcopy(login_req)
requests['l2'] = copy.deepcopy(login_req)
requests['l1']['body'] = login_req['body'].format(creds[0]['username'], creds[0]['password'])
requests['l2']['body'] = login_req['body'].format(creds[1]['username'], creds[1]['password'])
batches = list()
batches.append(Batch("create_accounts"))
batches[-1].add('c1', 0, 1, 1)
batches[-1].add('c2', 100, 1, 1)
batches.append(Batch("login_and_check", allow_redirects=True))
batches[-1].add('l1', 0, 10, 1)
batches[-1].add('l2', 0, 10, 1)
pprint.pformat(f"Sending attack payload..")
results = send_batches(batches, requests)
print(pprint.pformat(f"Results:\n{results}"), )
return results
if __name__ == "__main__":
my_requests = {
"1": {
"body": "username={}&password={}&",
"headers": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8,nl;q=0.7",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "JSESSIONID=6731A59A338A1A6104DEF9E879296BF1",
"Origin": "http://127.0.0.1:8090",
"Referer": "http://127.0.0.1:8090/WebGoat/login",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
},
"method": "POST",
"timestamp": 1543315415.7996092,
"url": "http://127.0.0.1:8090/WebGoat/login",
"id": 2
},
"2": {
"body": "agree=agree&username={}&password={}&matchingPassword={}&",
"headers": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8,nl;q=0.7",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "JSESSIONID=2639A17BBAF4BAA4DE0258F80C0F82E4",
"Origin": "http://127.0.0.1:8090",
"Referer": "http://127.0.0.1:8090/WebGoat/registration",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
},
"method": "POST",
"timestamp": 1543314627.112285,
"url": "http://127.0.0.1:8090/WebGoat/register.mvc",
"id": 1
}
}
| true | true |
1c31fb315f7247d4966153dbfaa07683628c0828 | 8,247 | py | Python | e2cnn/nn/modules/nonlinearities/norm.py | ziatdinovmax/e2cnn | e486a0d2cec71f2bde2d61f2f1315922f2883cee | [
"BSD-3-Clause"
] | null | null | null | e2cnn/nn/modules/nonlinearities/norm.py | ziatdinovmax/e2cnn | e486a0d2cec71f2bde2d61f2f1315922f2883cee | [
"BSD-3-Clause"
] | null | null | null | e2cnn/nn/modules/nonlinearities/norm.py | ziatdinovmax/e2cnn | e486a0d2cec71f2bde2d61f2f1315922f2883cee | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict
from torch.nn import Parameter
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
from typing import List, Tuple, Any
import numpy as np
__all__ = ["NormNonLinearity"]
class NormNonLinearity(EquivariantModule):
def __init__(self, in_type, function = "n_relu", bias = True):
r"""
Norm non-linearities.
This module applies a bias and an activation function over the norm of each field.
The input representation of the fields is preserved by this operation.
.. note ::
If 'squash' non-linearity (`function`) is chosen, no bias is allowed
Args:
in_type (FieldType): the input field type
function (str, optional): the identifier of the non-linearity. It is used to specify which function to
apply. By default (``'n_relu'``), ReLU is used.
bias (bool, optional): add bias to norm of fields before computing the non-linearity. Default: ``True``
"""
assert isinstance(in_type.gspace, GeneralOnR2)
super(NormNonLinearity, self).__init__()
for r in in_type.representations:
assert "norm" in r.supported_nonlinearities, (
'Error! Representation "{}" does not support "norm" non-linearity'
.format(r.name)
)
self.space = in_type.gspace
self.in_type = in_type
self.out_type = in_type
self._nfields = None
self.log_bias = None
if function == "n_relu":
self._function = torch.relu
elif function == "n_sigmoid":
self._function = torch.sigmoid
elif function == "squash":
self._function = lambda t: t / (1.0 + t)
assert (
bias is False
), "Error! When using squash non-linearity, norm bias is not allowed"
else:
raise ValueError('Function "{}" not recognized!'.format(function))
# group fields by their size and
# - check if fields of the same size are contiguous
# - retrieve the indices of the fields
# number of fields of each size
self._nfields = defaultdict(int)
# indices of the channales corresponding to fields belonging to each group
_indices = defaultdict(lambda: [])
# whether each group of fields is contiguous or not
self._contiguous = {}
position = 0
last_size = None
for i, r in enumerate(self.in_type.representations):
if r.size != last_size:
if not r.size in self._contiguous:
self._contiguous[r.size] = True
else:
self._contiguous[r.size] = False
last_size = r.size
_indices[r.size] += list(range(position, position + r.size))
self._nfields[r.size] += 1
position += r.size
for s, contiguous in self._contiguous.items():
if contiguous:
# for contiguous fields, only the first and last indices are kept
_indices[s] = torch.LongTensor([min(_indices[s]), max(_indices[s]) + 1])
else:
# otherwise, transform the list of indices into a tensor
_indices[s] = torch.LongTensor(_indices[s])
# register the indices tensors as parameters of this module
self.register_buffer("indices_{}".format(s), _indices[s])
if bias:
# build a bias for each field
self.log_bias = Parameter(
torch.zeros(1, len(self.in_type), 1, 1, dtype=torch.float),
requires_grad=True,
)
else:
self.log_bias = None
# build a sorted list of the fields groups, such that every time they are iterated through in the same order
self._order = sorted(self._contiguous.keys())
self.eps = Parameter(torch.tensor(1e-10), requires_grad=False)
def forward(self, input):
r"""
Apply norm non-linearities to the input feature map
Args:
input (GeometricTensor): the input feature map
Returns:
the resulting feature map
"""
assert input.type == self.in_type
input = input.tensor
# scalar multipliers needed to turn the old norms into the newly computed ones
multipliers = torch.empty_like(input)
b, c, h, w = input.shape
next_bias = 0
if self.log_bias is not None:
# build the bias
# biases = torch.nn.functional.elu(self.log_bias)
biases = torch.exp(self.log_bias)
# biases = torch.nn.functional.elu(self.log_bias) + 1
else:
biases = None
# iterate through all field sizes
for s in self._order:
# retrieve the corresponding fiber indices
indices = getattr(self, f"indices_{s}")
if self._contiguous[s]:
# if the fields were contiguous, we can use slicing
# retrieve the fields
fm = input[:, indices[0] : indices[1], :, :]
else:
# otherwise we have to use indexing
# retrieve the fields
fm = input[:, indices, :, :]
# compute the norm of each field
norms = fm.view(b, -1, s, h, w).norm(dim=2, keepdim=True)
# compute the new norms
if biases is not None:
# retrieve the bias elements corresponding to the current fields
bias = biases[:, next_bias : next_bias + self._nfields[s], ...].view(
1, -1, 1, 1, 1
)
new_norms = self._function(norms - bias)
else:
new_norms = self._function(norms)
# compute the scalar multipliers needed to turn the old norms into the newly computed ones
# m = torch.zeros_like(new_norms)
# in order to avoid division by 0
# mask = norms > 0.
# m[mask] = new_norms[mask] / norms[mask]
m = new_norms / torch.max(norms, self.eps)
m[norms <= self.eps] = 0.0
if self._contiguous[s]:
# expand the multipliers tensor to all channels for each field
multipliers[:, indices[0] : indices[1], :, :] = m.expand(
b, -1, s, h, w
).reshape(b, -1, h, w)
else:
# expand the multipliers tensor to all channels for each field
multipliers[:, indices, :, :] = m.expand(b, -1, s, h, w).reshape(
b, -1, h, w
)
# shift the position on the bias tensor
next_bias += self._nfields[s]
# multiply the input by the multipliers computed and wrap the result in a GeometricTensor
return GeometricTensor(input * multipliers, self.out_type)
def evaluate_output_shape(self, input_shape):
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol = 1e-6, rtol = 1e-5):
c = self.in_type.size
x = torch.randn(3, c, 10, 10)
x = GeometricTensor(x, self.in_type)
errors = []
for el in self.space.testing_elements:
out1 = self(x).transform_fibers(el)
out2 = self(x.transform_fibers(el))
errs = (out1.tensor - out2.tensor).detach().numpy()
errs = np.abs(errs).reshape(-1)
print(el, errs.max(), errs.mean(), errs.var())
assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), (
'The error found during equivariance check with element "{}" is too'
" high: max = {}, mean = {} var ={}".format(
el, errs.max(), errs.mean(), errs.var()
)
)
errors.append((el, errs.mean()))
return errors
| 33.79918 | 116 | 0.56178 | from collections import defaultdict
from torch.nn import Parameter
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
from typing import List, Tuple, Any
import numpy as np
__all__ = ["NormNonLinearity"]
class NormNonLinearity(EquivariantModule):
def __init__(self, in_type, function = "n_relu", bias = True):
assert isinstance(in_type.gspace, GeneralOnR2)
super(NormNonLinearity, self).__init__()
for r in in_type.representations:
assert "norm" in r.supported_nonlinearities, (
'Error! Representation "{}" does not support "norm" non-linearity'
.format(r.name)
)
self.space = in_type.gspace
self.in_type = in_type
self.out_type = in_type
self._nfields = None
self.log_bias = None
if function == "n_relu":
self._function = torch.relu
elif function == "n_sigmoid":
self._function = torch.sigmoid
elif function == "squash":
self._function = lambda t: t / (1.0 + t)
assert (
bias is False
), "Error! When using squash non-linearity, norm bias is not allowed"
else:
raise ValueError('Function "{}" not recognized!'.format(function))
self._nfields = defaultdict(int)
_indices = defaultdict(lambda: [])
self._contiguous = {}
position = 0
last_size = None
for i, r in enumerate(self.in_type.representations):
if r.size != last_size:
if not r.size in self._contiguous:
self._contiguous[r.size] = True
else:
self._contiguous[r.size] = False
last_size = r.size
_indices[r.size] += list(range(position, position + r.size))
self._nfields[r.size] += 1
position += r.size
for s, contiguous in self._contiguous.items():
if contiguous:
_indices[s] = torch.LongTensor([min(_indices[s]), max(_indices[s]) + 1])
else:
_indices[s] = torch.LongTensor(_indices[s])
self.register_buffer("indices_{}".format(s), _indices[s])
if bias:
self.log_bias = Parameter(
torch.zeros(1, len(self.in_type), 1, 1, dtype=torch.float),
requires_grad=True,
)
else:
self.log_bias = None
self._order = sorted(self._contiguous.keys())
self.eps = Parameter(torch.tensor(1e-10), requires_grad=False)
def forward(self, input):
assert input.type == self.in_type
input = input.tensor
multipliers = torch.empty_like(input)
b, c, h, w = input.shape
next_bias = 0
if self.log_bias is not None:
biases = torch.exp(self.log_bias)
else:
biases = None
for s in self._order:
indices = getattr(self, f"indices_{s}")
if self._contiguous[s]:
fm = input[:, indices[0] : indices[1], :, :]
else:
fm = input[:, indices, :, :]
norms = fm.view(b, -1, s, h, w).norm(dim=2, keepdim=True)
if biases is not None:
bias = biases[:, next_bias : next_bias + self._nfields[s], ...].view(
1, -1, 1, 1, 1
)
new_norms = self._function(norms - bias)
else:
new_norms = self._function(norms)
m = new_norms / torch.max(norms, self.eps)
m[norms <= self.eps] = 0.0
if self._contiguous[s]:
multipliers[:, indices[0] : indices[1], :, :] = m.expand(
b, -1, s, h, w
).reshape(b, -1, h, w)
else:
multipliers[:, indices, :, :] = m.expand(b, -1, s, h, w).reshape(
b, -1, h, w
)
next_bias += self._nfields[s]
return GeometricTensor(input * multipliers, self.out_type)
def evaluate_output_shape(self, input_shape):
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol = 1e-6, rtol = 1e-5):
c = self.in_type.size
x = torch.randn(3, c, 10, 10)
x = GeometricTensor(x, self.in_type)
errors = []
for el in self.space.testing_elements:
out1 = self(x).transform_fibers(el)
out2 = self(x.transform_fibers(el))
errs = (out1.tensor - out2.tensor).detach().numpy()
errs = np.abs(errs).reshape(-1)
print(el, errs.max(), errs.mean(), errs.var())
assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), (
'The error found during equivariance check with element "{}" is too'
" high: max = {}, mean = {} var ={}".format(
el, errs.max(), errs.mean(), errs.var()
)
)
errors.append((el, errs.mean()))
return errors
| true | true |
1c31fd9617e834df542ea98eca33b0edac8531de | 2,550 | py | Python | data_cleaner.py | PhilippMaxx/semeval2019_task3 | 0093fbffeb0dc0500b9c59ab7517ed89fa8edd8e | [
"Apache-2.0"
] | 2 | 2020-05-07T08:33:43.000Z | 2021-05-24T14:35:26.000Z | data_cleaner.py | PhilippMaxx/semeval2019_task3 | 0093fbffeb0dc0500b9c59ab7517ed89fa8edd8e | [
"Apache-2.0"
] | 1 | 2021-09-28T00:23:41.000Z | 2021-09-28T00:23:41.000Z | data_cleaner.py | PhilippMaxx/semeval2019_task3 | 0093fbffeb0dc0500b9c59ab7517ed89fa8edd8e | [
"Apache-2.0"
] | 1 | 2021-02-04T12:39:29.000Z | 2021-02-04T12:39:29.000Z | # coding=utf-8
""" Cleaning pipeline and data loader for SemEval 2019 task 3."""
from typing import List
import csv
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
import emoji
from emot import EMOTICONS
from utils import *
EMOTICONS = {expr: emo_transf(emo) for expr, emo in EMOTICONS.items()}
EMOTICONS_EKPHRASIS = {expr: emo_transf(emo) for expr, emo in emoticons.items()}
TEXT_PROCESSOR = TextPreProcessor(
# terms that will be normalized
# optional: numbers, percent, money, time, date
# user -- potential problem when twitter user
# url, email, phone -- no relevant information for emotion
# keep text as simple as possible
normalize=['url', 'email', 'phone', 'user'],
# terms that will be annotated - not in original data - test w/ and w/o
annotate={"repeated", "emphasis", "elongated"},
# fix HTML tokens
fix_html=True,
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=True, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[EMOTICONS_EKPHRASIS, EMOTICONS]
)
def process_pipeline(text: str) -> str:
"""processing pipeline for data cleaning"""
text = all_caps(text)
text = ' '.join(TEXT_PROCESSOR.pre_process_doc(text))
text = word_reps(text)
text = emoji.demojize(text, delimiters=(' ', ' '))
text = emoji_clean(text) # handle - in underscore reps of emojis
text = emoji_reps(text)
text = emoji_remove_underscope(text)
return text.lower().strip()
def data_load(file: str) -> List:
"""data loader for training and testing files"""
with open(file, 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t")
lines = []
for line in reader:
lines.append(line)
return lines
| 31.875 | 80 | 0.702353 |
from typing import List
import csv
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
import emoji
from emot import EMOTICONS
from utils import *
EMOTICONS = {expr: emo_transf(emo) for expr, emo in EMOTICONS.items()}
EMOTICONS_EKPHRASIS = {expr: emo_transf(emo) for expr, emo in emoticons.items()}
TEXT_PROCESSOR = TextPreProcessor(
normalize=['url', 'email', 'phone', 'user'],
annotate={"repeated", "emphasis", "elongated"},
fix_html=True,
segmenter="twitter",
corrector="twitter",
unpack_hashtags=True,
unpack_contractions=True,
spell_correct_elong=True, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[EMOTICONS_EKPHRASIS, EMOTICONS]
)
def process_pipeline(text: str) -> str:
text = all_caps(text)
text = ' '.join(TEXT_PROCESSOR.pre_process_doc(text))
text = word_reps(text)
text = emoji.demojize(text, delimiters=(' ', ' '))
text = emoji_clean(text) # handle - in underscore reps of emojis
text = emoji_reps(text)
text = emoji_remove_underscope(text)
return text.lower().strip()
def data_load(file: str) -> List:
with open(file, 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t")
lines = []
for line in reader:
lines.append(line)
return lines
| true | true |
1c31fe3e5c862bed78d4be14380cc1753d44d6b6 | 1,254 | py | Python | tests/testapp/test_test_utils.py | Incopro/django-mysql | 60df164ab21cd7c08ab3c734111bedda8efc113a | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/test_test_utils.py | Incopro/django-mysql | 60df164ab21cd7c08ab3c734111bedda8efc113a | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/test_test_utils.py | Incopro/django-mysql | 60df164ab21cd7c08ab3c734111bedda8efc113a | [
"BSD-3-Clause"
] | 1 | 2020-06-14T01:01:51.000Z | 2020-06-14T01:01:51.000Z | import django
import pytest
from django.db import connections
from django.test import TestCase
from django_mysql.test.utils import override_mysql_variables
class OverrideVarsMethodTest(TestCase):
@override_mysql_variables(SQL_MODE="MSSQL")
def test_method_sets_mssql(self):
self.check_sql_mode("MSSQL")
def check_sql_mode(self, expected, using="default"):
with connections[using].cursor() as cursor:
cursor.execute("SELECT @@SQL_MODE")
mode = cursor.fetchone()[0]
mode = mode.split(",")
assert expected in mode
@override_mysql_variables(SQL_MODE="ANSI")
class OverrideVarsClassTest(OverrideVarsMethodTest):
if django.VERSION >= (2, 2):
databases = ["default", "other"]
else:
multi_db = True
def test_class_sets_ansi(self):
self.check_sql_mode("ANSI")
@override_mysql_variables(using="other", SQL_MODE="MSSQL")
def test_other_connection(self):
self.check_sql_mode("ANSI")
self.check_sql_mode("MSSQL", using="other")
def test_it_fails_on_non_test_classes(self):
with pytest.raises(Exception):
@override_mysql_variables(SQL_MODE="ANSI")
class MyClass(object):
pass
| 27.866667 | 62 | 0.681021 | import django
import pytest
from django.db import connections
from django.test import TestCase
from django_mysql.test.utils import override_mysql_variables
class OverrideVarsMethodTest(TestCase):
@override_mysql_variables(SQL_MODE="MSSQL")
def test_method_sets_mssql(self):
self.check_sql_mode("MSSQL")
def check_sql_mode(self, expected, using="default"):
with connections[using].cursor() as cursor:
cursor.execute("SELECT @@SQL_MODE")
mode = cursor.fetchone()[0]
mode = mode.split(",")
assert expected in mode
@override_mysql_variables(SQL_MODE="ANSI")
class OverrideVarsClassTest(OverrideVarsMethodTest):
if django.VERSION >= (2, 2):
databases = ["default", "other"]
else:
multi_db = True
def test_class_sets_ansi(self):
self.check_sql_mode("ANSI")
@override_mysql_variables(using="other", SQL_MODE="MSSQL")
def test_other_connection(self):
self.check_sql_mode("ANSI")
self.check_sql_mode("MSSQL", using="other")
def test_it_fails_on_non_test_classes(self):
with pytest.raises(Exception):
@override_mysql_variables(SQL_MODE="ANSI")
class MyClass(object):
pass
| true | true |
1c31fe536e02d32fab4c8f36834d18288af37cc2 | 2,563 | py | Python | google/cloud/spanner_admin_database_v1/types/__init__.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 49 | 2020-02-06T17:36:32.000Z | 2022-03-31T05:32:29.000Z | google/cloud/spanner_admin_database_v1/types/__init__.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 417 | 2020-01-31T23:12:28.000Z | 2022-03-30T22:42:11.000Z | google/cloud/spanner_admin_database_v1/types/__init__.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 46 | 2020-01-31T22:54:25.000Z | 2022-03-29T12:04:55.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .backup import (
Backup,
BackupInfo,
CreateBackupEncryptionConfig,
CreateBackupMetadata,
CreateBackupRequest,
DeleteBackupRequest,
GetBackupRequest,
ListBackupOperationsRequest,
ListBackupOperationsResponse,
ListBackupsRequest,
ListBackupsResponse,
UpdateBackupRequest,
)
from .common import (
EncryptionConfig,
EncryptionInfo,
OperationProgress,
)
from .spanner_database_admin import (
CreateDatabaseMetadata,
CreateDatabaseRequest,
Database,
DropDatabaseRequest,
GetDatabaseDdlRequest,
GetDatabaseDdlResponse,
GetDatabaseRequest,
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
ListDatabasesRequest,
ListDatabasesResponse,
OptimizeRestoredDatabaseMetadata,
RestoreDatabaseEncryptionConfig,
RestoreDatabaseMetadata,
RestoreDatabaseRequest,
RestoreInfo,
UpdateDatabaseDdlMetadata,
UpdateDatabaseDdlRequest,
RestoreSourceType,
)
__all__ = (
"Backup",
"BackupInfo",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
"DeleteBackupRequest",
"GetBackupRequest",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"UpdateBackupRequest",
"EncryptionConfig",
"EncryptionInfo",
"OperationProgress",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
"Database",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
"ListDatabasesRequest",
"ListDatabasesResponse",
"OptimizeRestoredDatabaseMetadata",
"RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"RestoreDatabaseRequest",
"RestoreInfo",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
"RestoreSourceType",
)
| 27.55914 | 74 | 0.742489 |
from .backup import (
Backup,
BackupInfo,
CreateBackupEncryptionConfig,
CreateBackupMetadata,
CreateBackupRequest,
DeleteBackupRequest,
GetBackupRequest,
ListBackupOperationsRequest,
ListBackupOperationsResponse,
ListBackupsRequest,
ListBackupsResponse,
UpdateBackupRequest,
)
from .common import (
EncryptionConfig,
EncryptionInfo,
OperationProgress,
)
from .spanner_database_admin import (
CreateDatabaseMetadata,
CreateDatabaseRequest,
Database,
DropDatabaseRequest,
GetDatabaseDdlRequest,
GetDatabaseDdlResponse,
GetDatabaseRequest,
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
ListDatabasesRequest,
ListDatabasesResponse,
OptimizeRestoredDatabaseMetadata,
RestoreDatabaseEncryptionConfig,
RestoreDatabaseMetadata,
RestoreDatabaseRequest,
RestoreInfo,
UpdateDatabaseDdlMetadata,
UpdateDatabaseDdlRequest,
RestoreSourceType,
)
__all__ = (
"Backup",
"BackupInfo",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
"DeleteBackupRequest",
"GetBackupRequest",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"UpdateBackupRequest",
"EncryptionConfig",
"EncryptionInfo",
"OperationProgress",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
"Database",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
"ListDatabasesRequest",
"ListDatabasesResponse",
"OptimizeRestoredDatabaseMetadata",
"RestoreDatabaseEncryptionConfig",
"RestoreDatabaseMetadata",
"RestoreDatabaseRequest",
"RestoreInfo",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
"RestoreSourceType",
)
| true | true |
1c31feb3a0beed2b13b1024885dee420f9a90e4f | 15,152 | py | Python | src/sage/combinat/rigged_configurations/rc_crystal.py | Findstat/sage | d661c2c2bd18676014c151e9eec1e81ed12db9f6 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/rigged_configurations/rc_crystal.py | Findstat/sage | d661c2c2bd18676014c151e9eec1e81ed12db9f6 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/rigged_configurations/rc_crystal.py | Findstat/sage | d661c2c2bd18676014c151e9eec1e81ed12db9f6 | [
"BSL-1.0"
] | null | null | null | r"""
Crystal of Rigged Configurations
AUTHORS:
- Travis Scrimshaw (2010-09-26): Initial version
We only consider the highest weight crystal structure, not the
Kirillov-Reshetikhin structure, and we extend this to symmetrizable types.
"""
#*****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.highest_weight_crystals import HighestWeightCrystals
from sage.categories.regular_crystals import RegularCrystals
from sage.categories.classical_crystals import ClassicalCrystals
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurationOptions
from sage.combinat.rigged_configurations.rigged_configuration_element import (
RiggedConfigurationElement, RCHighestWeightElement, RCHWNonSimplyLacedElement)
from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition
# Note on implementation, this class is used for simply-laced types only
class CrystalOfRiggedConfigurations(UniqueRepresentation, Parent):
r"""
A highest weight crystal of rigged configurations.
The crystal structure for finite simply-laced types is given
in [CrysStructSchilling06]_. These were then shown to be the crystal
operators in all finite types in [SchScr]_ and all simply-laced and
a large class of foldings of simply-laced types in [SalScr]_.
INPUT:
- ``cartan_type`` -- (optional) a Cartan type
- ``wt`` -- the highest weight vector in the weight lattice
EXAMPLES:
For simplicity, we display the rigged configurations horizontally::
sage: RiggedConfigurations.global_options(display='horizontal')
We start with a simply-laced finite type::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: mg = RC.highest_weight_vector()
sage: mg.f_string([1,2])
0[ ]0 0[ ]-1
sage: mg.f_string([1,2,2])
0[ ]0 -2[ ][ ]-2
sage: mg.f_string([1,2,2,2])
sage: mg.f_string([2,1,1,2])
-1[ ][ ]-1 -1[ ][ ]-1
sage: RC.cardinality()
8
sage: T = crystals.Tableaux(['A', 2], shape=[2,1])
sage: RC.digraph().is_isomorphic(T.digraph(), edge_labels=True)
True
We reset the global options::
sage: RiggedConfigurations.global_options.reset()
REFERENCES:
.. [SchScr] Anne Schilling and Travis Scrimshaw.
*Crystal structure on rigged configurations and the filling map*.
:arxiv:`1409.2920`.
.. [SalScr] Ben Salisbury and Travis Scrimshaw.
*A rigged configuration model for* `B(\infty)`. :arxiv:`1404.6539`.
"""
@staticmethod
def __classcall_private__(cls, cartan_type, wt=None, WLR=None):
r"""
Normalize the input arguments to ensure unique representation.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: RC2 = crystals.RiggedConfigurations(['A', 2], La[1])
sage: RC3 = crystals.RiggedConfigurations(['A', 2], La[1], La[1].parent())
sage: RC is RC2 and RC2 is RC3
True
sage: La = RootSystem(['A',2,1]).weight_lattice().fundamental_weights()
sage: LaE = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: RCE = crystals.RiggedConfigurations(LaE[1])
sage: RC is RCE
False
"""
if wt is None:
wt = cartan_type
cartan_type = wt.parent().cartan_type()
else:
cartan_type = CartanType(cartan_type)
if WLR is None:
WLR = wt.parent()
else:
wt = WLR(wt)
if not cartan_type.is_simply_laced():
vct = cartan_type.as_folding()
return CrystalOfNonSimplyLacedRC(vct, wt, WLR)
return super(CrystalOfRiggedConfigurations, cls).__classcall__(cls, wt, WLR=WLR)
def __init__(self, wt, WLR):
r"""
Initialize ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: TestSuite(RC).run()
sage: La = RootSystem(['A', 2, 1]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[0])
sage: TestSuite(RC).run() # long time
"""
self._cartan_type = WLR.cartan_type()
self._wt = wt
self._rc_index = self._cartan_type.index_set()
# We store the cartan matrix for the vacancy number calculations for speed
self._cartan_matrix = self._cartan_type.cartan_matrix()
if self._cartan_type.is_finite():
category = ClassicalCrystals()
else:
category = (RegularCrystals(), HighestWeightCrystals(), InfiniteEnumeratedSets())
Parent.__init__(self, category=category)
n = self._cartan_type.rank() #== len(self._cartan_type.index_set())
self.module_generators = (self.element_class( self, partition_list=[[] for i in range(n)] ),)
global_options = RiggedConfigurationOptions
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 3]).weight_lattice().fundamental_weights()
sage: crystals.RiggedConfigurations(La[1])
Crystal of rigged configurations of type ['A', 3] and weight Lambda[1]
"""
return "Crystal of rigged configurations of type {0} and weight {1}".format(
self._cartan_type, self._wt)
def _element_constructor_(self, *lst, **options):
"""
Construct a ``RiggedConfigurationElement``.
Typically the user should not call this method since it does not check
if it is an actual configuration in the crystal. Instead the user
should use the iterator.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: RC(partition_list=[[1],[1]], rigging_list=[[0],[-1]])
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]-1
<BLANKLINE>
sage: RC(partition_list=[[1],[2]])
<BLANKLINE>
0[ ]0
<BLANKLINE>
-2[ ][ ]-2
<BLANKLINE>
TESTS:
Check that :trac:`17054` is fixed::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(4*La[1] + 4*La[2])
sage: B = crystals.infinity.RiggedConfigurations(['A',2])
sage: x = B.an_element().f_string([2,2,1,1,2,1,2,1])
sage: ascii_art(x)
-4[ ][ ][ ][ ]-4 -4[ ][ ][ ][ ]0
sage: ascii_art(RC(x.nu()))
0[ ][ ][ ][ ]-4 0[ ][ ][ ][ ]0
sage: x == B.an_element().f_string([2,2,1,1,2,1,2,1])
True
"""
if isinstance(lst[0], (list, tuple)):
lst = lst[0]
if isinstance(lst[0], RiggedPartition):
lst = [p._clone() for p in lst] # Make a deep copy
elif isinstance(lst[0], RiggedConfigurationElement):
lst = [p._clone() for p in lst[0]] # Make a deep copy
return self.element_class(self, list(lst), **options)
def _calc_vacancy_number(self, partitions, a, i, **options):
r"""
Calculate the vacancy number `p_i^{(a)}(\nu)` in ``self``.
This assumes that `\gamma_a = 1` for all `a` and
`(\alpha_a | \alpha_b ) = A_{ab}`.
INPUT:
- ``partitions`` -- the list of rigged partitions we are using
- ``a`` -- the rigged partition index
- ``i`` -- the row length
TESTS::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: elt = RC(partition_list=[[1],[2]])
sage: RC._calc_vacancy_number(elt.nu(), 1, 2)
-2
"""
vac_num = self._wt[self.index_set()[a]]
for b, value in enumerate(self._cartan_matrix.row(a)):
vac_num -= value * partitions[b].get_num_cells_to_column(i)
return vac_num
def weight_lattice_realization(self):
"""
Return the weight lattice realization used to express the weights
of elements in ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 2, 1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[0])
sage: RC.weight_lattice_realization()
Extended weight lattice of the Root system of type ['A', 2, 1]
"""
return self._wt.parent()
Element = RCHighestWeightElement
class CrystalOfNonSimplyLacedRC(CrystalOfRiggedConfigurations):
"""
Highest weight crystal of rigged configurations in non-simply-laced type.
"""
def __init__(self, vct, wt, WLR):
"""
Initialize ``self``.
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: TestSuite(RC).run()
"""
self._folded_ct = vct
CrystalOfRiggedConfigurations.__init__(self, wt, WLR)
@lazy_attribute
def virtual(self):
"""
Return the corresponding virtual crystal.
EXAMPLES::
sage: La = RootSystem(['C', 2, 1]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[0])
sage: RC
Crystal of rigged configurations of type ['C', 2, 1] and weight Lambda[0]
sage: RC.virtual
Crystal of rigged configurations of type ['A', 3, 1] and weight 2*Lambda[0]
"""
P = self._folded_ct._folding.root_system().weight_lattice()
gamma = self._folded_ct.scaling_factors()
sigma = self._folded_ct.folding_orbit()
vwt = P.sum_of_terms((b, gamma[a]*c) for a,c in self._wt for b in sigma[a])
return CrystalOfRiggedConfigurations(vwt)
def _calc_vacancy_number(self, partitions, a, i, **options):
r"""
Calculate the vacancy number `p_i^{(a)}(\nu)` in ``self``.
INPUT:
- ``partitions`` -- the list of rigged partitions we are using
- ``a`` -- the rigged partition index
- ``i`` -- the row length
TESTS::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[2])
sage: elt = RC(partition_list=[[], [1], [1]])
sage: RC._calc_vacancy_number(elt.nu(), 1, 1)
0
sage: RC._calc_vacancy_number(elt.nu(), 2, 1)
-1
"""
I = self.index_set()
ia = I[a]
vac_num = self._wt[ia]
gamma = self._folded_ct.scaling_factors()
for b, value in enumerate(self._cartan_matrix.row(a)):
ib = I[b]
q = partitions[b].get_num_cells_to_column(gamma[ia]*i, gamma[ib])
vac_num -= value * q / gamma[ib]
return vac_num
def to_virtual(self, rc):
"""
Convert ``rc`` into a rigged configuration in the virtual crystal.
INPUT:
- ``rc`` -- a rigged configuration element
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[2])
sage: elt = RC(partition_list=[[], [1], [1]]); elt
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
sage: RC.to_virtual(elt)
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
-2[ ][ ]-2
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
"""
gamma = [int(_) for _ in self._folded_ct.scaling_factors()]
sigma = self._folded_ct._orbit
n = self._folded_ct._folding.rank()
vindex = self._folded_ct._folding.index_set()
partitions = [None] * n
riggings = [None] * n
for a, rp in enumerate(rc):
for i in sigma[a]:
k = vindex.index(i)
partitions[k] = [row_len*gamma[a] for row_len in rp._list]
riggings[k] = [rig_val*gamma[a] for rig_val in rp.rigging]
return self.virtual.element_class(self.virtual, partition_list=partitions,
rigging_list=riggings)
def from_virtual(self, vrc):
"""
Convert ``vrc`` in the virtual crystal into a rigged configution of
the original Cartan type.
INPUT:
- ``vrc`` -- a virtual rigged configuration
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[2])
sage: elt = RC(partition_list=[[0], [1], [1]])
sage: elt == RC.from_virtual(RC.to_virtual(elt))
True
"""
gamma = list(self._folded_ct.scaling_factors()) #map(int, self._folded_ct.scaling_factors())
sigma = self._folded_ct._orbit
n = self._cartan_type.rank()
partitions = [None] * n
riggings = [None] * n
vac_nums = [None] * n
vindex = self._folded_ct._folding.index_set()
for a in range(n):
index = vindex.index(sigma[a][0])
partitions[a] = [row_len // gamma[a] for row_len in vrc[index]._list]
riggings[a] = [rig_val / gamma[a] for rig_val in vrc[index].rigging]
return self.element_class(self, partition_list=partitions, rigging_list=riggings)
Element = RCHWNonSimplyLacedElement
| 36.07619 | 101 | 0.590285 |
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.highest_weight_crystals import HighestWeightCrystals
from sage.categories.regular_crystals import RegularCrystals
from sage.categories.classical_crystals import ClassicalCrystals
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurationOptions
from sage.combinat.rigged_configurations.rigged_configuration_element import (
RiggedConfigurationElement, RCHighestWeightElement, RCHWNonSimplyLacedElement)
from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition
class CrystalOfRiggedConfigurations(UniqueRepresentation, Parent):
@staticmethod
def __classcall_private__(cls, cartan_type, wt=None, WLR=None):
if wt is None:
wt = cartan_type
cartan_type = wt.parent().cartan_type()
else:
cartan_type = CartanType(cartan_type)
if WLR is None:
WLR = wt.parent()
else:
wt = WLR(wt)
if not cartan_type.is_simply_laced():
vct = cartan_type.as_folding()
return CrystalOfNonSimplyLacedRC(vct, wt, WLR)
return super(CrystalOfRiggedConfigurations, cls).__classcall__(cls, wt, WLR=WLR)
def __init__(self, wt, WLR):
self._cartan_type = WLR.cartan_type()
self._wt = wt
self._rc_index = self._cartan_type.index_set()
self._cartan_matrix = self._cartan_type.cartan_matrix()
if self._cartan_type.is_finite():
category = ClassicalCrystals()
else:
category = (RegularCrystals(), HighestWeightCrystals(), InfiniteEnumeratedSets())
Parent.__init__(self, category=category)
n = self._cartan_type.rank()
self.module_generators = (self.element_class( self, partition_list=[[] for i in range(n)] ),)
global_options = RiggedConfigurationOptions
def _repr_(self):
return "Crystal of rigged configurations of type {0} and weight {1}".format(
self._cartan_type, self._wt)
def _element_constructor_(self, *lst, **options):
if isinstance(lst[0], (list, tuple)):
lst = lst[0]
if isinstance(lst[0], RiggedPartition):
lst = [p._clone() for p in lst]
elif isinstance(lst[0], RiggedConfigurationElement):
lst = [p._clone() for p in lst[0]]
return self.element_class(self, list(lst), **options)
def _calc_vacancy_number(self, partitions, a, i, **options):
vac_num = self._wt[self.index_set()[a]]
for b, value in enumerate(self._cartan_matrix.row(a)):
vac_num -= value * partitions[b].get_num_cells_to_column(i)
return vac_num
def weight_lattice_realization(self):
return self._wt.parent()
Element = RCHighestWeightElement
class CrystalOfNonSimplyLacedRC(CrystalOfRiggedConfigurations):
def __init__(self, vct, wt, WLR):
self._folded_ct = vct
CrystalOfRiggedConfigurations.__init__(self, wt, WLR)
@lazy_attribute
def virtual(self):
P = self._folded_ct._folding.root_system().weight_lattice()
gamma = self._folded_ct.scaling_factors()
sigma = self._folded_ct.folding_orbit()
vwt = P.sum_of_terms((b, gamma[a]*c) for a,c in self._wt for b in sigma[a])
return CrystalOfRiggedConfigurations(vwt)
def _calc_vacancy_number(self, partitions, a, i, **options):
I = self.index_set()
ia = I[a]
vac_num = self._wt[ia]
gamma = self._folded_ct.scaling_factors()
for b, value in enumerate(self._cartan_matrix.row(a)):
ib = I[b]
q = partitions[b].get_num_cells_to_column(gamma[ia]*i, gamma[ib])
vac_num -= value * q / gamma[ib]
return vac_num
def to_virtual(self, rc):
gamma = [int(_) for _ in self._folded_ct.scaling_factors()]
sigma = self._folded_ct._orbit
n = self._folded_ct._folding.rank()
vindex = self._folded_ct._folding.index_set()
partitions = [None] * n
riggings = [None] * n
for a, rp in enumerate(rc):
for i in sigma[a]:
k = vindex.index(i)
partitions[k] = [row_len*gamma[a] for row_len in rp._list]
riggings[k] = [rig_val*gamma[a] for rig_val in rp.rigging]
return self.virtual.element_class(self.virtual, partition_list=partitions,
rigging_list=riggings)
def from_virtual(self, vrc):
gamma = list(self._folded_ct.scaling_factors())
sigma = self._folded_ct._orbit
n = self._cartan_type.rank()
partitions = [None] * n
riggings = [None] * n
vac_nums = [None] * n
vindex = self._folded_ct._folding.index_set()
for a in range(n):
index = vindex.index(sigma[a][0])
partitions[a] = [row_len // gamma[a] for row_len in vrc[index]._list]
riggings[a] = [rig_val / gamma[a] for rig_val in vrc[index].rigging]
return self.element_class(self, partition_list=partitions, rigging_list=riggings)
Element = RCHWNonSimplyLacedElement
| true | true |
1c31fee605abafb3e22c85cd2383cccfa60a89f8 | 2,597 | py | Python | runtime/hetdesrun/runtime/logging.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | runtime/hetdesrun/runtime/logging.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | runtime/hetdesrun/runtime/logging.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | from typing import Any, Literal
import contextvars
import datetime
import json
from uuid import UUID
import logging
import numpy as np
_WF_EXEC_LOGGING_CONTEXT_VAR: contextvars.ContextVar[dict] = contextvars.ContextVar(
"workflow_execution_logging_context"
)
class MinimallyMoreCapableJsonEncoder(json.JSONEncoder):
"""Additionally handles datetimes and UUIDs
Usage:
json.dumps(object_to_serialize, cls=MinimallyMoreCapableJsonEncoder)
"""
def default(self, obj: Any) -> Any: # pylint: disable=arguments-renamed
if isinstance(obj, UUID):
# if the obj is uuid, we simply return the value of uuid
return obj.hex
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def _get_context() -> dict:
try:
return _WF_EXEC_LOGGING_CONTEXT_VAR.get()
except LookupError:
_WF_EXEC_LOGGING_CONTEXT_VAR.set({})
return _WF_EXEC_LOGGING_CONTEXT_VAR.get()
class ExecutionContextFilter(logging.Filter):
"""Filter to enrich log records with execution environment information"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.currently_executed_node_instance = None
self.currently_executed_component = None
super().__init__(*args, **kwargs)
def bind_context(self, **kwargs: Any) -> None: # pylint: disable=no-self-use
_get_context().update(kwargs)
def unbind_context(self, *args: str) -> None: # pylint: disable=no-self-use
"""Remove entries with provided keys from context"""
ctx_dict = _get_context()
for key in args:
ctx_dict.pop(key, None)
def clear_context(self) -> None: # pylint: disable=no-self-use
_WF_EXEC_LOGGING_CONTEXT_VAR.set({})
def filter(self, record: logging.LogRecord) -> Literal[True]:
context_dict = _get_context()
record.currently_executed_instance_id = context_dict.get( # type: ignore
"currently_executed_instance_id", None
)
record.currently_executed_component_id = context_dict.get( # type: ignore
"currently_executed_component_id", None
)
record.currently_executed_component_node_name = context_dict.get( # type: ignore
"currently_executed_component_node_name", None
)
record.job_id = context_dict.get("job_id", None) # type: ignore
return True
execution_context_filter = ExecutionContextFilter()
| 31.670732 | 89 | 0.683481 | from typing import Any, Literal
import contextvars
import datetime
import json
from uuid import UUID
import logging
import numpy as np
_WF_EXEC_LOGGING_CONTEXT_VAR: contextvars.ContextVar[dict] = contextvars.ContextVar(
"workflow_execution_logging_context"
)
class MinimallyMoreCapableJsonEncoder(json.JSONEncoder):
def default(self, obj: Any) -> Any:
if isinstance(obj, UUID):
return obj.hex
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def _get_context() -> dict:
try:
return _WF_EXEC_LOGGING_CONTEXT_VAR.get()
except LookupError:
_WF_EXEC_LOGGING_CONTEXT_VAR.set({})
return _WF_EXEC_LOGGING_CONTEXT_VAR.get()
class ExecutionContextFilter(logging.Filter):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.currently_executed_node_instance = None
self.currently_executed_component = None
super().__init__(*args, **kwargs)
def bind_context(self, **kwargs: Any) -> None:
_get_context().update(kwargs)
def unbind_context(self, *args: str) -> None:
ctx_dict = _get_context()
for key in args:
ctx_dict.pop(key, None)
def clear_context(self) -> None:
_WF_EXEC_LOGGING_CONTEXT_VAR.set({})
def filter(self, record: logging.LogRecord) -> Literal[True]:
context_dict = _get_context()
record.currently_executed_instance_id = context_dict.get(
"currently_executed_instance_id", None
)
record.currently_executed_component_id = context_dict.get(
"currently_executed_component_id", None
)
record.currently_executed_component_node_name = context_dict.get(
"currently_executed_component_node_name", None
)
record.job_id = context_dict.get("job_id", None)
return True
execution_context_filter = ExecutionContextFilter()
| true | true |
1c31ffc0e49197fad9fcd6de0fe0caeed253e8ea | 15,436 | py | Python | galpopfm/dust_infer.py | IQcollaboratory/galpopFM | 1b30abc1cc2fd1119d0f34a237b0c1112d7afc9d | [
"MIT"
] | 1 | 2020-02-08T17:36:06.000Z | 2020-02-08T17:36:06.000Z | galpopfm/dust_infer.py | IQcollaboratory/galpopFM | 1b30abc1cc2fd1119d0f34a237b0c1112d7afc9d | [
"MIT"
] | 35 | 2020-02-07T19:02:27.000Z | 2021-02-04T14:28:05.000Z | galpopfm/dust_infer.py | IQcollaboratory/galpopFM | 1b30abc1cc2fd1119d0f34a237b0c1112d7afc9d | [
"MIT"
] | null | null | null | '''
'''
import os
import sys
import h5py
import numpy as np
from scipy.stats import chi2
np.seterr(divide='ignore', invalid='ignore')
# -- abcpmc --
import abcpmc
from abcpmc import mpi_util
# -- galpopfm --
from . import dustfm as dustFM
from . import measure_obs as measureObs
dat_dir = os.environ['GALPOPFM_DIR']
def distance_metric(x_obs, x_model, method='chi2', x_err=None):
''' distance metric between forward model m(theta) and observations
notes
-----
* simple L2 norm between the 3D histogram of [Rmag, Balmer, FUV-NUV]
'''
if x_err is None:
x_err = [1. for _x in x_obs]
if method == 'chi2': # chi-squared
rho = [np.sum((_obs - _mod)**2/_err**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L2': # chi-squared
rho = [np.sum((_obs - _mod)**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L1': # L1 morm
rho = [np.sum(np.abs(_obs - _mod))
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
else:
raise NotImplementedError
return rho
def sumstat_obs(statistic='2d', return_bins=False):
''' summary statistics for SDSS observations is the 3D histgram of
[M_r, G-R, FUV - NUV].
notes
-----
* 09/22/2020: observation summary statistics updated to Jeremy's SDSS
catalog (centrals *and* satellites) with NSA absolute magnitudes
* see `nb/observables.ipynb` to see exactly how the summary statistic is
calculated.
'''
if statistic == '1d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr.GR.FUVNUV.npy'),
allow_pickle=True)
dgr = gr_edges[1] - gr_edges[0]
nbar = dgr * np.sum(x_gr)
x_obs = [nbar, x_gr, x_fn]
elif statistic == '2d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR.Mr_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
nbar = dr * dgr * np.sum(x_gr),
x_obs = [nbar, x_gr, x_fn]
elif statistic == '3d':
r_edges, gr_edges, fn_edges, _x_obs, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
nbar = dr * dgr * dfn * np.sum(_x_obs)
x_obs = [nbar, _x_obs]
if return_bins:
return r_edges, gr_edges, fn_edges, x_obs
return x_obs
def sumstat_model(theta, sed=None, dem='slab_calzetti', f_downsample=1.,
statistic='2d', noise=True, seed=None, return_datavector=False,
sfr0_prescription='adhoc'):
''' calculate summary statistics for forward model m(theta)
:param theta:
array of input parameters
:param sed:
dictionary with SEDs of **central** galaxies
:param dem:
string specifying the dust empirical model
:param f_downsample:
if f_downsample > 1., then the SED dictionary is downsampled.
:param sfr0_prescription:
prescription for dealing with SFR=0 galaxies
notes
-----
* 09/22/2020: simple noise model implemented
* 4/22/2020: extra_data kwarg added. This is to pass pre-sampled
observables for SFR = 0 galaxies
'''
# don't touch these values! they are set to agree with the binning of
# obersvable
nbins = [8, 400, 200]
ranges = [(20, 24), (-5., 20.), (-5, 45.)]
dRmag = 0.5
dGR = 0.0625
dfuvnuv = 0.25
# SFR=0 galaxies
sfr0 = (sed['logsfr.inst'] == -999)
if sfr0_prescription == 'adhoc':
raise ValueError
#R_mag_sfr0, G_R_sfr0, FUV_NUV_sfr0 = _observable_zeroSFR(
# sed['wave'],
# sed['sed_noneb'][sfr0,:])
elif sfr0_prescription == 'sfrmin':
logsfr_min = sed['logsfr.inst'][~sfr0].min() # minimum SFR
print(logsfr_min)
sed['logsfr.inst'][sfr0] = logsfr_min
else:
raise NotImplementedError
sed_dusty = dustFM.Attenuate(
theta,
sed['wave'],
sed['sed_noneb'],
sed['sed_onlyneb'],
sed['logmstar'],
sed['logsfr.inst'],
dem=dem)
# observational measurements
F_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_fuv')
N_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_nuv')
G_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='g_sdss')
R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='r_sdss')
# apply FUV and NUV cut
uv_cut = (F_mag < -13.5) & (N_mag < -14)
F_mag = F_mag[uv_cut]
N_mag = N_mag[uv_cut]
G_mag = G_mag[uv_cut]
R_mag = R_mag[uv_cut]
# calculate color
FUV_NUV = F_mag - N_mag
G_R = G_mag - R_mag
if sfr0_prescription == 'adhoc':
# append sampled SFR=0 observables to data vector
R_mag = np.concatenate([R_mag, R_mag_sfr0])
G_R = np.concatenate([G_R, G_R_sfr0])
FUV_NUV = np.concatenate([FUV_NUV, FUV_NUV_sfr0])
n_gal = len(R_mag)
if noise:
if seed is not None:
np.random.seed(seed)
# noise model (simplest model)
sig_R = chi2.rvs(3, loc=0.02, scale=0.00003, size=n_gal)
sig_FN = chi2.rvs(2, loc=0.05, scale=0.05, size=n_gal)
sig_GR = chi2.rvs(3, size=n_gal) * (0.00001 * (R_mag + 20.1) + 0.00005)\
+ (0.000025 * (R_mag + 20.1) + 0.02835)
R_mag += np.random.normal(size=n_gal) * sig_R
FUV_NUV += np.random.normal(size=n_gal) * sig_FN
G_R += np.random.normal(size=n_gal) * sig_GR
data_vector = np.array([-1.*R_mag, G_R, FUV_NUV]).T
if return_datavector:
return data_vector.T, uv_cut
Nbins, _ = np.histogramdd(data_vector, bins=nbins, range=ranges)
# volume of simulation
vol = {'simba': 100.**3, 'tng': 75.**3, 'eagle': 67.77**3}[sed['sim']]
x_model = Nbins.astype(float) / vol / dRmag / dGR / dfuvnuv / f_downsample
nbar = dRmag * dGR * dfuvnuv * np.sum(x_model)
if statistic == '3d':
return [nbar, x_model]
elif statistic == '2d':
x_r_gr = dfuvnuv * np.sum(x_model, axis=2)
x_r_fn = dGR * np.sum(x_model, axis=1)
return [nbar, x_r_gr, x_r_fn]
elif statistic == '1d':
x_gr = dRmag * np.sum(dfuvnuv * np.sum(x_model, axis=2), axis=0)
x_fn = dRmag * np.sum(dGR * np.sum(x_model, axis=1), axis=0)
return [nbar, x_gr, x_fn]
def _observable_zeroSFR(wave, sed):
''' for SFR = 0 galaxies, sample G-R and FUV-NUV color directly from G-R
and FUV-NUV distributions of quiescent SDSS galaxies. This is to remove
these galaxies from consideration in the inference.
See `nb/sdss_quiescent_sumstat.ipynb` for details.
notes
-----
* 09/22/2020: updated the quiescent distributions since the observational
dataset has been updated.
* in principle, the G-R and FUV-NUV sampling can done for R bins, but at
the moment it does not.
* this only runs once so its not optimized in any way
'''
ngal = sed.shape[0]
# read in G-R and FUV-NUV distributions of SDSS quiescent galaxies
gr_edges, gr_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.G_R_dist.npy'), allow_pickle=True)
fn_edges, fn_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.FUV_NUV_dist.npy'), allow_pickle=True)
# calculate Mr from SEDs
R_mag = measureObs.AbsMag_sed(wave, sed, band='r_sdss')
# now sample from SDSS distribution using inverse transform sampling
gr_cdf = np.cumsum(gr_nbins)/np.sum(gr_nbins) # calculate CDFs for both distributions
fn_cdf = np.cumsum(fn_nbins)/np.sum(fn_nbins)
us = np.random.rand(ngal)
G_R = np.empty(ngal)
FUV_NUV = np.empty(ngal)
for i, u in enumerate(us):
G_R[i] = 0.5*(gr_edges[:-1] + gr_edges[1:])[np.abs(u - gr_cdf).argmin()]
FUV_NUV[i] = 0.5*(fn_edges[:-1] + fn_edges[1:])[np.abs(u - fn_cdf).argmin()]
return [R_mag, G_R, FUV_NUV]
def median_alongr(rmag, values, rmin=-20., rmax=-24., nbins=16):
''' find the median of specified values as a function of rmag
'''
dr = (rmin - rmax)/float(nbins)
medians = []
for i in range(nbins-1):
rbin = (rmag < rmin-dr*i) & (rmag >= rmin-dr*(i+1)) & np.isfinite(values)
medians.append(np.median(values[rbin]))
rmid = rmin - dr*(np.arange(nbins-1).astype(int)+0.5)
return rmid, np.array(medians)
def _read_sed(name, seed=0):
''' read in sed files
'''
if name not in ['simba', 'tng', 'eagle']: raise NotImplementedError
fhdf5 = os.path.join(dat_dir, 'sed', '%s.hdf5' % name)
f = h5py.File(fhdf5, 'r')
sed = {}
sed['wave'] = f['wave'][...]
sed['sed_neb'] = f['sed_neb'][...]
sed['sed_noneb'] = f['sed_noneb'][...]
sed['sed_onlyneb'] = sed['sed_neb'] - sed['sed_noneb'] # only nebular emissoins
sed['logmstar'] = f['logmstar'][...]
if 'logsfr.100' in f.keys():
sed['logsfr.100'] = f['logsfr.100'][...]
sed['logsfr.inst'] = f['logsfr.inst'][...]
sed['censat'] = f['censat'][...]
f.close()
'''
# deal with SFR resolution effect by unifromly sampling the SFR
# over 0 to resolution limit
if name == 'simba':
res_sfr = 0.182
elif name == 'tng':
res_sfr = 0.005142070183729021 # THIS IS WRONG!!!
np.random.seed(seed)
isnan = (~np.isfinite(sed['logsfr.100']))
sed['logsfr.100'][isnan] = np.log10(np.random.uniform(0., res_sfr, size=np.sum(isnan)))
'''
if 'logsfr.100' in f.keys():
isnan = (~np.isfinite(sed['logsfr.100']))
sed['logsfr.100'][isnan] = -999.
isnan = (~np.isfinite(sed['logsfr.inst']))
sed['logsfr.inst'][isnan] = -999.
return sed
def writeABC(type, pool, prior=None, abc_dir=None):
''' Given abcpmc pool object. Writeout specified ABC pool property
'''
if abc_dir is None:
abc_dir = os.path.join(dat_dir, 'abc')
if type == 'init': # initialize
if not os.path.exists(abc_dir):
try:
os.makedirs(abc_dir)
except OSError:
pass
# write specific info of the run
f = open(os.path.join(abc_dir, 'info.md'), 'w')
f.write('# '+run+' run specs \n')
f.write('N_particles = %i \n' % pool.N)
f.write('Distance function = %s \n' % pool.dist.__name__)
# prior
f.write('Top Hat Priors \n')
f.write('Prior Min = [%s] \n' % ','.join([str(prior_obj.min[i]) for i in range(len(prior_obj.min))]))
f.write('Prior Max = [%s] \n' % ','.join([str(prior_obj.max[i]) for i in range(len(prior_obj.max))]))
f.close()
elif type == 'eps': # threshold writeout
if pool is None: # write or overwrite threshold writeout
f = open(os.path.join(abc_dir, 'epsilon.dat'), "w")
else:
f = open(os.path.join(abc_dir, 'epsilon.dat'), "a") # append
f.write(str(pool.eps)+'\t'+str(pool.ratio)+'\n')
f.close()
elif type == 'theta': # particle thetas
np.savetxt(os.path.join(abc_dir, 'theta.t%i.dat' % (pool.t)), pool.thetas)
elif type == 'w': # particle weights
np.savetxt(os.path.join(abc_dir, 'w.t%i.dat' % (pool.t)), pool.ws)
elif type == 'rho': # distance
np.savetxt(os.path.join(abc_dir, 'rho.t%i.dat' % (pool.t)), pool.dists)
else:
raise ValueError
return None
def plotABC(pool, prior=None, dem='slab_calzetti', abc_dir=None):
''' Given abcpmc pool object plot the particles
'''
import corner as DFM
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
# sometimes this formatting fails
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
except:
pass
# prior range
prior_range = [(_min, _max) for _min, _max in zip(prior.min, prior.max)]
# theta labels
if dem == 'slab_calzetti':
lbls = [r'$m_{\tau}$', r'$c_{\tau}$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_simple':
lbls = [r'$c_{\tau}$', r'$c_{\delta}$']
elif dem == 'slab_noll_m':
lbls = [r'$m_{\tau}$', r'$c_{\tau}$', r'$m_\delta$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr':
lbls = [r'$m_{\tau,1}$', r'$m_{\tau,2}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'tnorm_noll_msfr':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr_fixbump':
lbls = [r'$m_{\tau,1}$', r'$m_{\tau,2}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$']#, r'$f_{\rm neb}$']
elif dem == 'tnorm_noll_msfr_fixbump':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr_kink_fixbump':
lbls = [r'$m_{\tau,{\rm low}~M_*}$', r'$m_{\tau,{\rm high}~M_*}$',
r'$m_{\tau,{\rm low~SFR}}$', r'$m_{\tau,{\rm high~SFR}}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
elif dem == 'slab_noll_mssfr_fixbump':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
else:
raise NotImplementedError
if abc_dir is None:
abc_dir = os.path.join(dat_dir, 'abc')
fig = DFM.corner(
pool.thetas,
range=prior_range,
weights=pool.ws,
quantiles=[0.16, 0.5, 0.84],
levels=[0.68, 0.95],
nbin=20,
smooth=True,
labels=lbls,
label_kwargs={'fontsize': 20})
try:
fig.savefig(os.path.join(abc_dir, 'abc.t%i.png' % pool.t) , bbox_inches='tight')
except:
fig.savefig(os.path.join(abc_dir, 'abc.t%i.pdf' % pool.t) , bbox_inches='tight')
return None
| 36.752381 | 109 | 0.561415 | import os
import sys
import h5py
import numpy as np
from scipy.stats import chi2
np.seterr(divide='ignore', invalid='ignore')
import abcpmc
from abcpmc import mpi_util
from . import dustfm as dustFM
from . import measure_obs as measureObs
dat_dir = os.environ['GALPOPFM_DIR']
def distance_metric(x_obs, x_model, method='chi2', x_err=None):
if x_err is None:
x_err = [1. for _x in x_obs]
if method == 'chi2':
rho = [np.sum((_obs - _mod)**2/_err**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L2':
rho = [np.sum((_obs - _mod)**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L1':
rho = [np.sum(np.abs(_obs - _mod))
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
else:
raise NotImplementedError
return rho
def sumstat_obs(statistic='2d', return_bins=False):
if statistic == '1d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr.GR.FUVNUV.npy'),
allow_pickle=True)
dgr = gr_edges[1] - gr_edges[0]
nbar = dgr * np.sum(x_gr)
x_obs = [nbar, x_gr, x_fn]
elif statistic == '2d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR.Mr_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
nbar = dr * dgr * np.sum(x_gr),
x_obs = [nbar, x_gr, x_fn]
elif statistic == '3d':
r_edges, gr_edges, fn_edges, _x_obs, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
nbar = dr * dgr * dfn * np.sum(_x_obs)
x_obs = [nbar, _x_obs]
if return_bins:
return r_edges, gr_edges, fn_edges, x_obs
return x_obs
def sumstat_model(theta, sed=None, dem='slab_calzetti', f_downsample=1.,
statistic='2d', noise=True, seed=None, return_datavector=False,
sfr0_prescription='adhoc'):
# obersvable
nbins = [8, 400, 200]
ranges = [(20, 24), (-5., 20.), (-5, 45.)]
dRmag = 0.5
dGR = 0.0625
dfuvnuv = 0.25
# SFR=0 galaxies
sfr0 = (sed['logsfr.inst'] == -999)
if sfr0_prescription == 'adhoc':
raise ValueError
#R_mag_sfr0, G_R_sfr0, FUV_NUV_sfr0 = _observable_zeroSFR(
# sed['wave'],
# sed['sed_noneb'][sfr0,:])
elif sfr0_prescription == 'sfrmin':
logsfr_min = sed['logsfr.inst'][~sfr0].min() # minimum SFR
print(logsfr_min)
sed['logsfr.inst'][sfr0] = logsfr_min
else:
raise NotImplementedError
sed_dusty = dustFM.Attenuate(
theta,
sed['wave'],
sed['sed_noneb'],
sed['sed_onlyneb'],
sed['logmstar'],
sed['logsfr.inst'],
dem=dem)
# observational measurements
F_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_fuv')
N_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_nuv')
G_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='g_sdss')
R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='r_sdss')
# apply FUV and NUV cut
uv_cut = (F_mag < -13.5) & (N_mag < -14)
F_mag = F_mag[uv_cut]
N_mag = N_mag[uv_cut]
G_mag = G_mag[uv_cut]
R_mag = R_mag[uv_cut]
# calculate color
FUV_NUV = F_mag - N_mag
G_R = G_mag - R_mag
if sfr0_prescription == 'adhoc':
# append sampled SFR=0 observables to data vector
R_mag = np.concatenate([R_mag, R_mag_sfr0])
G_R = np.concatenate([G_R, G_R_sfr0])
FUV_NUV = np.concatenate([FUV_NUV, FUV_NUV_sfr0])
n_gal = len(R_mag)
if noise:
if seed is not None:
np.random.seed(seed)
# noise model (simplest model)
sig_R = chi2.rvs(3, loc=0.02, scale=0.00003, size=n_gal)
sig_FN = chi2.rvs(2, loc=0.05, scale=0.05, size=n_gal)
sig_GR = chi2.rvs(3, size=n_gal) * (0.00001 * (R_mag + 20.1) + 0.00005)\
+ (0.000025 * (R_mag + 20.1) + 0.02835)
R_mag += np.random.normal(size=n_gal) * sig_R
FUV_NUV += np.random.normal(size=n_gal) * sig_FN
G_R += np.random.normal(size=n_gal) * sig_GR
data_vector = np.array([-1.*R_mag, G_R, FUV_NUV]).T
if return_datavector:
return data_vector.T, uv_cut
Nbins, _ = np.histogramdd(data_vector, bins=nbins, range=ranges)
# volume of simulation
vol = {'simba': 100.**3, 'tng': 75.**3, 'eagle': 67.77**3}[sed['sim']]
x_model = Nbins.astype(float) / vol / dRmag / dGR / dfuvnuv / f_downsample
nbar = dRmag * dGR * dfuvnuv * np.sum(x_model)
if statistic == '3d':
return [nbar, x_model]
elif statistic == '2d':
x_r_gr = dfuvnuv * np.sum(x_model, axis=2)
x_r_fn = dGR * np.sum(x_model, axis=1)
return [nbar, x_r_gr, x_r_fn]
elif statistic == '1d':
x_gr = dRmag * np.sum(dfuvnuv * np.sum(x_model, axis=2), axis=0)
x_fn = dRmag * np.sum(dGR * np.sum(x_model, axis=1), axis=0)
return [nbar, x_gr, x_fn]
def _observable_zeroSFR(wave, sed):
ngal = sed.shape[0]
# read in G-R and FUV-NUV distributions of SDSS quiescent galaxies
gr_edges, gr_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.G_R_dist.npy'), allow_pickle=True)
fn_edges, fn_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.FUV_NUV_dist.npy'), allow_pickle=True)
# calculate Mr from SEDs
R_mag = measureObs.AbsMag_sed(wave, sed, band='r_sdss')
# now sample from SDSS distribution using inverse transform sampling
gr_cdf = np.cumsum(gr_nbins)/np.sum(gr_nbins) # calculate CDFs for both distributions
fn_cdf = np.cumsum(fn_nbins)/np.sum(fn_nbins)
us = np.random.rand(ngal)
G_R = np.empty(ngal)
FUV_NUV = np.empty(ngal)
for i, u in enumerate(us):
G_R[i] = 0.5*(gr_edges[:-1] + gr_edges[1:])[np.abs(u - gr_cdf).argmin()]
FUV_NUV[i] = 0.5*(fn_edges[:-1] + fn_edges[1:])[np.abs(u - fn_cdf).argmin()]
return [R_mag, G_R, FUV_NUV]
def median_alongr(rmag, values, rmin=-20., rmax=-24., nbins=16):
dr = (rmin - rmax)/float(nbins)
medians = []
for i in range(nbins-1):
rbin = (rmag < rmin-dr*i) & (rmag >= rmin-dr*(i+1)) & np.isfinite(values)
medians.append(np.median(values[rbin]))
rmid = rmin - dr*(np.arange(nbins-1).astype(int)+0.5)
return rmid, np.array(medians)
def _read_sed(name, seed=0):
if name not in ['simba', 'tng', 'eagle']: raise NotImplementedError
fhdf5 = os.path.join(dat_dir, 'sed', '%s.hdf5' % name)
f = h5py.File(fhdf5, 'r')
sed = {}
sed['wave'] = f['wave'][...]
sed['sed_neb'] = f['sed_neb'][...]
sed['sed_noneb'] = f['sed_noneb'][...]
sed['sed_onlyneb'] = sed['sed_neb'] - sed['sed_noneb'] # only nebular emissoins
sed['logmstar'] = f['logmstar'][...]
if 'logsfr.100' in f.keys():
sed['logsfr.100'] = f['logsfr.100'][...]
sed['logsfr.inst'] = f['logsfr.inst'][...]
sed['censat'] = f['censat'][...]
f.close()
if 'logsfr.100' in f.keys():
isnan = (~np.isfinite(sed['logsfr.100']))
sed['logsfr.100'][isnan] = -999.
isnan = (~np.isfinite(sed['logsfr.inst']))
sed['logsfr.inst'][isnan] = -999.
return sed
def writeABC(type, pool, prior=None, abc_dir=None):
if abc_dir is None:
abc_dir = os.path.join(dat_dir, 'abc')
if type == 'init': # initialize
if not os.path.exists(abc_dir):
try:
os.makedirs(abc_dir)
except OSError:
pass
# write specific info of the run
f = open(os.path.join(abc_dir, 'info.md'), 'w')
f.write('
f.write('N_particles = %i \n' % pool.N)
f.write('Distance function = %s \n' % pool.dist.__name__)
# prior
f.write('Top Hat Priors \n')
f.write('Prior Min = [%s] \n' % ','.join([str(prior_obj.min[i]) for i in range(len(prior_obj.min))]))
f.write('Prior Max = [%s] \n' % ','.join([str(prior_obj.max[i]) for i in range(len(prior_obj.max))]))
f.close()
elif type == 'eps': # threshold writeout
if pool is None: # write or overwrite threshold writeout
f = open(os.path.join(abc_dir, 'epsilon.dat'), "w")
else:
f = open(os.path.join(abc_dir, 'epsilon.dat'), "a") # append
f.write(str(pool.eps)+'\t'+str(pool.ratio)+'\n')
f.close()
elif type == 'theta': # particle thetas
np.savetxt(os.path.join(abc_dir, 'theta.t%i.dat' % (pool.t)), pool.thetas)
elif type == 'w': # particle weights
np.savetxt(os.path.join(abc_dir, 'w.t%i.dat' % (pool.t)), pool.ws)
elif type == 'rho': # distance
np.savetxt(os.path.join(abc_dir, 'rho.t%i.dat' % (pool.t)), pool.dists)
else:
raise ValueError
return None
def plotABC(pool, prior=None, dem='slab_calzetti', abc_dir=None):
import corner as DFM
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
# sometimes this formatting fails
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
except:
pass
# prior range
prior_range = [(_min, _max) for _min, _max in zip(prior.min, prior.max)]
# theta labels
if dem == 'slab_calzetti':
lbls = [r'$m_{\tau}$', r'$c_{\tau}$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_simple':
lbls = [r'$c_{\tau}$', r'$c_{\delta}$']
elif dem == 'slab_noll_m':
lbls = [r'$m_{\tau}$', r'$c_{\tau}$', r'$m_\delta$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr':
lbls = [r'$m_{\tau,1}$', r'$m_{\tau,2}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'tnorm_noll_msfr':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$m_E$', r'$c_E$', r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr_fixbump':
lbls = [r'$m_{\tau,1}$', r'$m_{\tau,2}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$']#, r'$f_{\rm neb}$']
elif dem == 'tnorm_noll_msfr_fixbump':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
elif dem == 'slab_noll_msfr_kink_fixbump':
lbls = [r'$m_{\tau,{\rm low}~M_*}$', r'$m_{\tau,{\rm high}~M_*}$',
r'$m_{\tau,{\rm low~SFR}}$', r'$m_{\tau,{\rm high~SFR}}$', r'$c_{\tau}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
elif dem == 'slab_noll_mssfr_fixbump':
lbls = [r'$m_{\mu,1}$', r'$m_{\mu,2}$', r'$c_{\mu}$',
r'$m_{\sigma,1}$', r'$m_{\sigma,2}$', r'$c_{\sigma}$',
r'$m_{\delta,1}$', r'$m_{\delta,2}$', r'$c_\delta$',
r'$f_{\rm neb}$']
else:
raise NotImplementedError
if abc_dir is None:
abc_dir = os.path.join(dat_dir, 'abc')
fig = DFM.corner(
pool.thetas,
range=prior_range,
weights=pool.ws,
quantiles=[0.16, 0.5, 0.84],
levels=[0.68, 0.95],
nbin=20,
smooth=True,
labels=lbls,
label_kwargs={'fontsize': 20})
try:
fig.savefig(os.path.join(abc_dir, 'abc.t%i.png' % pool.t) , bbox_inches='tight')
except:
fig.savefig(os.path.join(abc_dir, 'abc.t%i.pdf' % pool.t) , bbox_inches='tight')
return None
| true | true |
1c31ffdd2b45111c693651f89499b6e4ef53720a | 403 | py | Python | src/master.py | guavadata/peru_sinadef_eda | 4e57f08cf4496d124e5297d4d30a1a0736efc37d | [
"MIT"
] | null | null | null | src/master.py | guavadata/peru_sinadef_eda | 4e57f08cf4496d124e5297d4d30a1a0736efc37d | [
"MIT"
] | null | null | null | src/master.py | guavadata/peru_sinadef_eda | 4e57f08cf4496d124e5297d4d30a1a0736efc37d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
This is the master script for recreating the results
It imports each of the key other scripts and
runs them one by one.
Run the whole thing from the root directory
to replicate all of the python analysis
"""
import src.download_raw_data as dwl_raw
import src.clean_data as cln_data
import src.transform_data as ts_data
dwl_raw.main()
cln_data.clean_data()
ts_data.main()
| 20.15 | 52 | 0.784119 |
import src.download_raw_data as dwl_raw
import src.clean_data as cln_data
import src.transform_data as ts_data
dwl_raw.main()
cln_data.clean_data()
ts_data.main()
| true | true |
1c320081365f5d4e0c730b1b9a1090eb3d5e77e0 | 1,144 | py | Python | src/public/src/FM7/util/python/txt2cpp.py | rothberg-cmu/rothberg-run | a42df5ca9fae97de77753864f60d05295d77b59f | [
"MIT"
] | 1 | 2019-08-10T00:24:09.000Z | 2019-08-10T00:24:09.000Z | src/public/src/FM7/util/python/txt2cpp.py | rothberg-cmu/rothberg-run | a42df5ca9fae97de77753864f60d05295d77b59f | [
"MIT"
] | null | null | null | src/public/src/FM7/util/python/txt2cpp.py | rothberg-cmu/rothberg-run | a42df5ca9fae97de77753864f60d05295d77b59f | [
"MIT"
] | 2 | 2019-05-01T03:11:10.000Z | 2019-05-01T03:30:35.000Z | import sys
import os
# Limitation: header and cpp needs to be in the same directory.
def TextFileToCpp(cppFName,hFName,varName,binFName):
strArray=[]
fp=open(binFName,"r")
for str in fp:
str=str.replace('\n','')
str=str.replace('\r','')
strArray.append(str)
fp.close()
TextToCpp(cppFName,hFName,varName,strArray)
def TextToCpp(cppFName,hFName,varName,strArray):
HFNAME=hFName.upper().replace(".","_")
HFNAME=HFNAME.replace("/","_")
HFNAME=HFNAME.replace("\\","_")
hFp=open(hFName,"w")
hFp.write("#ifndef "+HFNAME+"_IS_INCLUDED\n")
hFp.write("#define "+HFNAME+"_IS_INCLUDED\n")
hFp.write("\n");
hFp.write("extern const char * const "+varName+"[];\n");
hFp.write("\n");
hFp.write("#endif\n")
hFp.close()
cppFp=open(cppFName,"w")
cppFp.write('#include "'+os.path.basename(hFName)+'"\n')
cppFp.write("\n")
cppFp.write("const char * const "+varName+"[]=\n")
cppFp.write("{\n");
for s in strArray:
cppFp.write('\t"'+s+'",\n')
cppFp.write("\tnullptr,\n")
cppFp.write("};\n");
cppFp.close();
def main():
TextFileToCpp(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
if __name__=="__main__":
main()
| 20.070175 | 63 | 0.652972 | import sys
import os
def TextFileToCpp(cppFName,hFName,varName,binFName):
strArray=[]
fp=open(binFName,"r")
for str in fp:
str=str.replace('\n','')
str=str.replace('\r','')
strArray.append(str)
fp.close()
TextToCpp(cppFName,hFName,varName,strArray)
def TextToCpp(cppFName,hFName,varName,strArray):
HFNAME=hFName.upper().replace(".","_")
HFNAME=HFNAME.replace("/","_")
HFNAME=HFNAME.replace("\\","_")
hFp=open(hFName,"w")
hFp.write("#ifndef "+HFNAME+"_IS_INCLUDED\n")
hFp.write("#define "+HFNAME+"_IS_INCLUDED\n")
hFp.write("\n");
hFp.write("extern const char * const "+varName+"[];\n");
hFp.write("\n");
hFp.write("#endif\n")
hFp.close()
cppFp=open(cppFName,"w")
cppFp.write('#include "'+os.path.basename(hFName)+'"\n')
cppFp.write("\n")
cppFp.write("const char * const "+varName+"[]=\n")
cppFp.write("{\n");
for s in strArray:
cppFp.write('\t"'+s+'",\n')
cppFp.write("\tnullptr,\n")
cppFp.write("};\n");
cppFp.close();
def main():
TextFileToCpp(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
if __name__=="__main__":
main()
| true | true |
1c32009e48bfe3934197a2d23c0e6d0a42c586f9 | 5,578 | py | Python | tests/test_visitors/test_ast/test_conditions/test_implicit_complex_compare.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 1,931 | 2018-03-17T13:52:45.000Z | 2022-03-27T09:39:17.000Z | tests/test_visitors/test_ast/test_conditions/test_implicit_complex_compare.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 2,231 | 2018-03-09T21:19:05.000Z | 2022-03-31T08:35:37.000Z | tests/test_visitors/test_ast/test_conditions/test_implicit_complex_compare.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 492 | 2018-05-18T21:20:28.000Z | 2022-03-20T14:11:50.000Z | import pytest
from wemake_python_styleguide.violations.consistency import (
ImplicitComplexCompareViolation,
)
from wemake_python_styleguide.visitors.ast.conditions import (
ImplicitBoolPatternsVisitor,
)
# Won't match our rule with any values:
less_or_less = '{0} < {1} or {2} < {3}'
less_or_more = '{0} < {1} or {2} > {3}'
more_or_more = '{0} > {1} or {2} > {3}'
lesseq_or_less = '{0} <= {1} or {2} < {3}'
less_or_lesseq = '{0} < {1} or {2} <= {3}'
lesseq_or_lesseq = '{0} <= {1} or {2} <= {3}'
lesseq_or_more = '{0} <= {1} or {2} > {3}'
less_or_moreeq = '{0} < {1} or {2} >= {3}'
lesseq_or_moreeq = '{0} <= {1} or {2} >= {3}'
moreeq_or_more = '{0} >= {1} or {2} > {3}'
more_or_moreeq = '{0} > {1} or {2} >= {3}'
moreeq_or_moreeq = '{0} >= {1} or {2} >= {3}'
# Will match our rule with some values:
more_and_more = '{0} > {1} and {2} > {3}' # a > b > c
less_and_less = '{0} < {1} and {2} < {3}' # a < b < c
less_and_more = '{0} < {1} and {2} > {3}' # a < b < c
more_and_less = '{0} > {1} and {2} < {3}' # a > b > c
moreeq_and_more = '{0} >= {1} and {2} > {3}'
more_and_moreeq = '{0} > {1} and {2} >= {3}'
moreeq_and_moreeq = '{0} >= {1} and {2} >= {3}'
lesseq_and_less = '{0} <= {1} and {2} < {3}'
less_and_lesseq = '{0} < {1} and {2} <= {3}'
lesseq_and_lesseq = '{0} <= {1} and {2} <= {3}'
lesseq_and_more = '{0} <= {1} and {2} > {3}'
less_and_moreeq = '{0} < {1} and {2} >= {3}'
lesseq_and_moreeq = '{0} <= {1} and {2} >= {3}'
moreeq_and_less = '{0} >= {1} and {2} < {3}'
more_and_lesseq = '{0} > {1} and {2} <= {3}'
moreq_and_lesseq = '{0} >= {1} and {2} <= {3}'
@pytest.mark.parametrize('code', [
more_and_more,
less_and_less,
moreeq_and_more,
more_and_moreeq,
moreeq_and_moreeq,
lesseq_and_less,
less_and_lesseq,
lesseq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'b', 'c'),
('a', 'b', 'b', '10'),
('a()', 'b', 'b', 'c'),
('a', 'b', 'b', 'c(1, 2, 3)'),
('a(None)', 'b', 'b', 'c()'),
('a.prop', 'b', 'b', 'c.method()'),
('a("string")', 'b', 'b', '2'),
('a', 'b', 'b', 'c and other == 1'),
('a', 'b and other == 1', 'b', 'c'),
('1', 'a', 'a', '10'),
('1', 'a', 'a', 'b'),
('1', 'a', 'a', '10 and call()'),
])
def test_implicit_complex_compare(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing implicit complex compare."""
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitComplexCompareViolation])
@pytest.mark.parametrize('code', [
less_and_more,
lesseq_and_more,
less_and_moreeq,
lesseq_and_moreeq,
more_and_less,
moreeq_and_less,
more_and_lesseq,
moreq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'c', 'b'),
('a', 'b', 'c(k, v)', 'b'),
('a(1)', 'b', 'c', 'b'),
('a', 'b', 'c.attr', 'b'),
('a.method()', 'b', 'c', 'b'),
('a.method(value)', 'b', '1', 'b'),
('a', 'b', '10', 'b'),
('1', 'b', 'c', 'b'),
('1', 'b', '10', 'b'),
('a', 'b', 'c', 'b and other == 1'),
])
def test_implicit_complex_compare_reversed(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing implicit complex compare."""
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitComplexCompareViolation])
@pytest.mark.parametrize('code', [
more_and_more,
moreeq_and_more,
more_and_moreeq,
moreeq_and_moreeq,
less_and_less,
lesseq_and_less,
less_and_lesseq,
lesseq_and_lesseq,
less_and_more,
lesseq_and_more,
less_and_moreeq,
lesseq_and_moreeq,
more_and_less,
moreeq_and_less,
more_and_lesseq,
moreq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'None', 'b', 'c'),
])
def test_compare_wrong_values(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing implicit complex compare."""
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
less_or_less,
less_or_more,
more_or_more,
lesseq_or_less,
less_or_lesseq,
lesseq_or_lesseq,
lesseq_or_more,
less_or_moreeq,
lesseq_or_moreeq,
moreeq_or_more,
more_or_moreeq,
moreeq_or_moreeq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'b', 'c'),
('a', 'b', 'a', 'c'),
('a', 'c', 'b', 'c'),
('a', '1', 'a', '2'),
('a', 'b', 'b', 'c and other == 1'),
])
def test_regular_compare(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing implicit complex compare."""
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
'a < b',
'a > c',
'a and b',
'a or c',
'not a',
])
def test_regular_short_compare(
code,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing implicit complex compare."""
tree = parse_ast_tree(code)
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| 23.939914 | 69 | 0.576371 | import pytest
from wemake_python_styleguide.violations.consistency import (
ImplicitComplexCompareViolation,
)
from wemake_python_styleguide.visitors.ast.conditions import (
ImplicitBoolPatternsVisitor,
)
less_or_less = '{0} < {1} or {2} < {3}'
less_or_more = '{0} < {1} or {2} > {3}'
more_or_more = '{0} > {1} or {2} > {3}'
lesseq_or_less = '{0} <= {1} or {2} < {3}'
less_or_lesseq = '{0} < {1} or {2} <= {3}'
lesseq_or_lesseq = '{0} <= {1} or {2} <= {3}'
lesseq_or_more = '{0} <= {1} or {2} > {3}'
less_or_moreeq = '{0} < {1} or {2} >= {3}'
lesseq_or_moreeq = '{0} <= {1} or {2} >= {3}'
moreeq_or_more = '{0} >= {1} or {2} > {3}'
more_or_moreeq = '{0} > {1} or {2} >= {3}'
moreeq_or_moreeq = '{0} >= {1} or {2} >= {3}'
# Will match our rule with some values:
more_and_more = '{0} > {1} and {2} > {3}' # a > b > c
less_and_less = '{0} < {1} and {2} < {3}' # a < b < c
less_and_more = '{0} < {1} and {2} > {3}' # a < b < c
more_and_less = '{0} > {1} and {2} < {3}' # a > b > c
moreeq_and_more = '{0} >= {1} and {2} > {3}'
more_and_moreeq = '{0} > {1} and {2} >= {3}'
moreeq_and_moreeq = '{0} >= {1} and {2} >= {3}'
lesseq_and_less = '{0} <= {1} and {2} < {3}'
less_and_lesseq = '{0} < {1} and {2} <= {3}'
lesseq_and_lesseq = '{0} <= {1} and {2} <= {3}'
lesseq_and_more = '{0} <= {1} and {2} > {3}'
less_and_moreeq = '{0} < {1} and {2} >= {3}'
lesseq_and_moreeq = '{0} <= {1} and {2} >= {3}'
moreeq_and_less = '{0} >= {1} and {2} < {3}'
more_and_lesseq = '{0} > {1} and {2} <= {3}'
moreq_and_lesseq = '{0} >= {1} and {2} <= {3}'
@pytest.mark.parametrize('code', [
more_and_more,
less_and_less,
moreeq_and_more,
more_and_moreeq,
moreeq_and_moreeq,
lesseq_and_less,
less_and_lesseq,
lesseq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'b', 'c'),
('a', 'b', 'b', '10'),
('a()', 'b', 'b', 'c'),
('a', 'b', 'b', 'c(1, 2, 3)'),
('a(None)', 'b', 'b', 'c()'),
('a.prop', 'b', 'b', 'c.method()'),
('a("string")', 'b', 'b', '2'),
('a', 'b', 'b', 'c and other == 1'),
('a', 'b and other == 1', 'b', 'c'),
('1', 'a', 'a', '10'),
('1', 'a', 'a', 'b'),
('1', 'a', 'a', '10 and call()'),
])
def test_implicit_complex_compare(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitComplexCompareViolation])
@pytest.mark.parametrize('code', [
less_and_more,
lesseq_and_more,
less_and_moreeq,
lesseq_and_moreeq,
more_and_less,
moreeq_and_less,
more_and_lesseq,
moreq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'c', 'b'),
('a', 'b', 'c(k, v)', 'b'),
('a(1)', 'b', 'c', 'b'),
('a', 'b', 'c.attr', 'b'),
('a.method()', 'b', 'c', 'b'),
('a.method(value)', 'b', '1', 'b'),
('a', 'b', '10', 'b'),
('1', 'b', 'c', 'b'),
('1', 'b', '10', 'b'),
('a', 'b', 'c', 'b and other == 1'),
])
def test_implicit_complex_compare_reversed(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitComplexCompareViolation])
@pytest.mark.parametrize('code', [
more_and_more,
moreeq_and_more,
more_and_moreeq,
moreeq_and_moreeq,
less_and_less,
lesseq_and_less,
less_and_lesseq,
lesseq_and_lesseq,
less_and_more,
lesseq_and_more,
less_and_moreeq,
lesseq_and_moreeq,
more_and_less,
moreeq_and_less,
more_and_lesseq,
moreq_and_lesseq,
])
@pytest.mark.parametrize('comparators', [
('a', 'None', 'b', 'c'),
])
def test_compare_wrong_values(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
less_or_less,
less_or_more,
more_or_more,
lesseq_or_less,
less_or_lesseq,
lesseq_or_lesseq,
lesseq_or_more,
less_or_moreeq,
lesseq_or_moreeq,
moreeq_or_more,
more_or_moreeq,
moreeq_or_moreeq,
])
@pytest.mark.parametrize('comparators', [
('a', 'b', 'b', 'c'),
('a', 'b', 'a', 'c'),
('a', 'c', 'b', 'c'),
('a', '1', 'a', '2'),
('a', 'b', 'b', 'c and other == 1'),
])
def test_regular_compare(
code,
comparators,
assert_errors,
parse_ast_tree,
default_options,
):
tree = parse_ast_tree(code.format(*comparators))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
'a < b',
'a > c',
'a and b',
'a or c',
'not a',
])
def test_regular_short_compare(
code,
assert_errors,
parse_ast_tree,
default_options,
):
tree = parse_ast_tree(code)
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| true | true |
1c3201552e943a88d2210587978744feca81cc31 | 1,520 | py | Python | .history/classes/Menu_20171107132254.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107132254.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107132254.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | # DADSA - Assignment 1
# Reece Benson
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current_menu = "main"
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = {
# Main Menu
'main':
{
'New Season': 'new_season',
'Load Season': 'load_season'
},
# New Season
'new_season':
{
'Sub Item 1': self.load_action,
'Sub Item 2': self.load_action,
'Sub Item 3': self.load_action,
'Sub Item 4': self.load_action
},
# Load Season
'load_season':
{
'Sub Item 1': {
'Sub Sub Item 1': self.load_action,
'Sub Sub Item 2': self.load_action
}
}
}
# Display our Menu
self.display()
def display(self):
cur_count = 0
m = self.get_current_menu()
for i in m:
cur_count += 1
print(i)
def get_current_menu(self):
index = self._current_menu
return self._menu[index]
def get_input(self):
#TODO: Get user's input from defined menu
print("Get Input")
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action") | 24.126984 | 55 | 0.465789 |
class Menu():
_app = None
_menu = None
_current_menu = "main"
def __init__(self, app):
self._app = app
def load(self):
self._menu = {
'main':
{
'New Season': 'new_season',
'Load Season': 'load_season'
},
'new_season':
{
'Sub Item 1': self.load_action,
'Sub Item 2': self.load_action,
'Sub Item 3': self.load_action,
'Sub Item 4': self.load_action
},
'load_season':
{
'Sub Item 1': {
'Sub Sub Item 1': self.load_action,
'Sub Sub Item 2': self.load_action
}
}
}
self.display()
def display(self):
cur_count = 0
m = self.get_current_menu()
for i in m:
cur_count += 1
print(i)
def get_current_menu(self):
index = self._current_menu
return self._menu[index]
def get_input(self):
print("Get Input")
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action") | true | true |
1c32015a3c35228c38c5bac706f794e1cdc33050 | 7,376 | py | Python | validation/utils/m1.py | PedrV/stfX | 017436cd4ade7f0ea95185d82408697c43ac6ce6 | [
"MIT"
] | null | null | null | validation/utils/m1.py | PedrV/stfX | 017436cd4ade7f0ea95185d82408697c43ac6ce6 | [
"MIT"
] | null | null | null | validation/utils/m1.py | PedrV/stfX | 017436cd4ade7f0ea95185d82408697c43ac6ce6 | [
"MIT"
] | null | null | null | import unittest
import os
from matplotlib import pyplot as plt
from shapely import geometry, affinity
X_COORDINATE = 0
Y_COORDINATE = 1
def extract_x_y(polygon: list) -> (list, list):
"""Extract the x and y coordinates as two separate lists"""
x_list = []
y_list = []
for vertex in polygon:
x_list.append(vertex[X_COORDINATE])
y_list.append(vertex[Y_COORDINATE])
return (x_list, y_list)
def save_fig(dir: str):
"""Save the current plt figure in the given directory under the name: m1.png"""
plt.savefig(dir + '/m1.png')
plt.clf()
def plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None):
"""Plot the given two polygons, in a single figure, with different colors"""
h1_x, h1_y = extract_x_y(hull)
h2_x, h2_y = extract_x_y(min_hull)
p1_x, p1_y = extract_x_y(perceived_poly)
p2_x, p2_y = extract_x_y(real_poly)
# Figure settings
fig = plt.figure()
# fig.suptitle('Convex hull area (red) VS real representation area (blue)')
plt.xlabel('x')
plt.ylabel('y')
# Plotting hulls
plt.fill(h1_x, h1_y, color="#FF000020")
plt.fill(h2_x, h2_y, color="#0000FF20")
# Plotting polygons lines
plt.plot(p1_x, p1_y, color="#FF000060") # Red perceived poly
plt.plot(p2_x, p2_y, color="#0000FF60") # Blue real poly
# Plotting polygons points
for p in perceived_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color="#FF0000A0")
for p in real_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color="#0000FFA0")
# plt.show()
if dir is not None:
save_fig(dir)
def surveyor_formula(polygon: list) -> float:
"""Find the area of the given polygon using the surveyor formula"""
# Check if first and last points of polygon are equal
parsed_poly = polygon[0:-1]\
if polygon[0] == polygon[len(polygon)-1]\
else polygon
area = 0
for i in range(-1, len(parsed_poly)-1):
area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\
parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE]
return abs(area / 2)
def polygon_to_vertices_list(polygon: geometry.Polygon) -> list:
"""Extract the polygon vertices as a list"""
return list(polygon.exterior.coords)
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon)
def apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float:
"""Apply the metric M1 and obtain its result, between 0 and 1"""
joint_point_set = real_representation + perceived_representation
# Getting necessary hulls
real_convex_hull = geometry.MultiPoint(real_representation).convex_hull
perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull
convex_hull = geometry.MultiPoint(joint_point_set).convex_hull
# Getting vertices of hulls
real_vertices = polygon_to_vertices_list(real_convex_hull)
perceived_vertices = polygon_to_vertices_list(perceived_hull)
joint_vertices = polygon_to_vertices_list(convex_hull)
# Getting the min area
real_area = surveyor_formula(real_vertices)
perceived_area = surveyor_formula(perceived_vertices)
if real_area <= perceived_area:
min_area = real_area
min_vertices = real_vertices
else:
min_area = perceived_area
min_vertices = perceived_vertices
plot_polygons(hull=joint_vertices,
min_hull=min_vertices,
perceived_poly=perceived_representation,
real_poly=real_representation,
dir=dir)
return min_area / surveyor_formula(joint_vertices)
class TestM1(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestM1, self).__init__(*args, **kwargs)
self.representation = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1],
[1, 1]
]
self.transformations = [{
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 5]}},
{"type": "ROTATION", "trigger": {"transformation": 180}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.25}}
]
}, {
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 0]}},
{"type": "ROTATION", "trigger": {"transformation": -90}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.6}}
]
}]
self.min_scale = [{
"events": [
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 0.5}}
]
}]
def test_area(self):
square = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1]
]
self.assertEqual(surveyor_formula(square), 4)
self.assertEqual(surveyor_formula(self.representation), 4)
def test_transformations(self):
self.assertEqual(apply_transformations(self.representation, self.transformations), [
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0),
])
def test_M1(self):
self.assertEqual(apply_m1(self.representation, self.representation), 1)
self.assertTrue(apply_m1(self.representation,
apply_transformations(self.representation, self.transformations))
< 0.1)
self.assertEqual(apply_m1([
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0)],
apply_transformations(self.representation, self.transformations)),
1)
def test_mean_perceived(self):
self.assertEqual(apply_m1(self.representation,
apply_transformations(self.representation, self.min_scale)),
0.25)
if __name__ == '__main__':
unittest.main()
| 33.990783 | 102 | 0.590564 | import unittest
import os
from matplotlib import pyplot as plt
from shapely import geometry, affinity
X_COORDINATE = 0
Y_COORDINATE = 1
def extract_x_y(polygon: list) -> (list, list):
x_list = []
y_list = []
for vertex in polygon:
x_list.append(vertex[X_COORDINATE])
y_list.append(vertex[Y_COORDINATE])
return (x_list, y_list)
def save_fig(dir: str):
plt.savefig(dir + '/m1.png')
plt.clf()
def plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None):
h1_x, h1_y = extract_x_y(hull)
h2_x, h2_y = extract_x_y(min_hull)
p1_x, p1_y = extract_x_y(perceived_poly)
p2_x, p2_y = extract_x_y(real_poly)
fig = plt.figure()
plt.xlabel('x')
plt.ylabel('y')
plt.fill(h1_x, h1_y, color="#FF000020")
plt.fill(h2_x, h2_y, color="#0000FF20")
plt.plot(p1_x, p1_y, color="#FF000060")
plt.plot(p2_x, p2_y, color="#0000FF60")
for p in perceived_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color="#FF0000A0")
for p in real_poly:
plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color="#0000FFA0")
if dir is not None:
save_fig(dir)
def surveyor_formula(polygon: list) -> float:
parsed_poly = polygon[0:-1]\
if polygon[0] == polygon[len(polygon)-1]\
else polygon
area = 0
for i in range(-1, len(parsed_poly)-1):
area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\
parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE]
return abs(area / 2)
def polygon_to_vertices_list(polygon: geometry.Polygon) -> list:
return list(polygon.exterior.coords)
def apply_transformations(initial_representation: list, events: list) -> float:
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon)
def apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float:
joint_point_set = real_representation + perceived_representation
real_convex_hull = geometry.MultiPoint(real_representation).convex_hull
perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull
convex_hull = geometry.MultiPoint(joint_point_set).convex_hull
real_vertices = polygon_to_vertices_list(real_convex_hull)
perceived_vertices = polygon_to_vertices_list(perceived_hull)
joint_vertices = polygon_to_vertices_list(convex_hull)
real_area = surveyor_formula(real_vertices)
perceived_area = surveyor_formula(perceived_vertices)
if real_area <= perceived_area:
min_area = real_area
min_vertices = real_vertices
else:
min_area = perceived_area
min_vertices = perceived_vertices
plot_polygons(hull=joint_vertices,
min_hull=min_vertices,
perceived_poly=perceived_representation,
real_poly=real_representation,
dir=dir)
return min_area / surveyor_formula(joint_vertices)
class TestM1(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestM1, self).__init__(*args, **kwargs)
self.representation = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1],
[1, 1]
]
self.transformations = [{
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 5]}},
{"type": "ROTATION", "trigger": {"transformation": 180}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.25}}
]
}, {
"events": [
{"type": "TRANSLATION", "trigger": {"transformation": [5, 0]}},
{"type": "ROTATION", "trigger": {"transformation": -90}},
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 1.6}}
]
}]
self.min_scale = [{
"events": [
{"type": "UNIFORM_SCALE", "trigger": {"transformation": 0.5}}
]
}]
def test_area(self):
square = [
[1, 1],
[1, -1],
[-1, -1],
[-1, 1]
]
self.assertEqual(surveyor_formula(square), 4)
self.assertEqual(surveyor_formula(self.representation), 4)
def test_transformations(self):
self.assertEqual(apply_transformations(self.representation, self.transformations), [
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0),
])
def test_M1(self):
self.assertEqual(apply_m1(self.representation, self.representation), 1)
self.assertTrue(apply_m1(self.representation,
apply_transformations(self.representation, self.transformations))
< 0.1)
self.assertEqual(apply_m1([
(8.0, 7.0),
(12.0, 7.0),
(12.0, 3.0),
(8.0, 3.0),
(8.0, 7.0)],
apply_transformations(self.representation, self.transformations)),
1)
def test_mean_perceived(self):
self.assertEqual(apply_m1(self.representation,
apply_transformations(self.representation, self.min_scale)),
0.25)
if __name__ == '__main__':
unittest.main()
| true | true |
1c32021dbc6606cac205c70d2190b3573b2a43c5 | 132 | py | Python | major_leagues/__init__.py | jvolden/major_leagues | 1245baab2c4af92285fe3a026391e429cec5af57 | [
"MIT"
] | null | null | null | major_leagues/__init__.py | jvolden/major_leagues | 1245baab2c4af92285fe3a026391e429cec5af57 | [
"MIT"
] | null | null | null | major_leagues/__init__.py | jvolden/major_leagues | 1245baab2c4af92285fe3a026391e429cec5af57 | [
"MIT"
] | null | null | null | """Top-level package for Major Leagues."""
__author__ = """Jon Patrick Volden"""
__email__ = 'volden@ku.edu'
__version__ = '0.1.0'
| 22 | 42 | 0.681818 |
__author__ = """Jon Patrick Volden"""
__email__ = 'volden@ku.edu'
__version__ = '0.1.0'
| true | true |
1c3202268e3ed7be78e98ac5031be316427fe925 | 4,488 | py | Python | exp.py | tenagusami-ms/exp | 9b439a768d5788baf3f882282643aa72b9ffd314 | [
"MIT"
] | null | null | null | exp.py | tenagusami-ms/exp | 9b439a768d5788baf3f882282643aa72b9ffd314 | [
"MIT"
] | null | null | null | exp.py | tenagusami-ms/exp | 9b439a768d5788baf3f882282643aa72b9ffd314 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""Overview:
exp.py : open a directory or a file looked from WSL2 with Windows Explorer
if it is in the Windows filesystem.
If no path is specified, current directory is opened.
Usage:
exp.py [<path>]
exp.py -h | --help
Options:
-h --help Show this screen and exit.
"""
from __future__ import annotations
import dataclasses
import os
from subprocess import run
import re
import pathlib as p
import sys
from functools import reduce
from typing import Optional, MutableMapping
from docopt import docopt
from schema import Schema, SchemaError, Use, And
def main() -> None:
"""
The main procedure
"""
if os.name == "nt":
print(f"This tool {__file__} is usable only on WSL2.\n")
sys.exit(1)
try:
options: Options = read_options()
explorer: p.Path = p.Path(r"/mnt") / "c" / "Windows" / "explorer.exe"
open_on_windows(explorer, options.path)
except(UsageError, NotInspectableError) as e:
sys.stderr.write(e.args[0])
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
class Error(Exception):
"""
The fundamental exception class
"""
pass
class NotInspectableError(Error):
"""
The Error when the path on a pure WSL2 filesystem is inspected.
"""
pass
class UsageError(Error):
"""
The error for usage of a function.
"""
pass
@dataclasses.dataclass
class Options:
"""
dataclass for arguments and options
"""
path: p.Path
def read_options() -> Options:
"""
read command line arguments and options
Returns:
option class(Options)
Raises:
NotInspectableError: the file or the directory does not exists.
"""
args: MutableMapping = docopt(__doc__)
schema = Schema({
"<path>": And(Use(get_path), lambda path: path.is_file() or path.is_dir(),
error=f"The specified path {args['<path>']}"
" does not exist.\n")
})
try:
args = schema.validate(args)
except SchemaError as e:
raise NotInspectableError(e.args[0])
return Options(args["<path>"])
def wsl2_full_path2windows_path(wsl2_path: p.Path) -> p.PureWindowsPath:
"""
convert a wsl2 path (posix path) to the corresponding windows path.
Args:
wsl2_path(pathlib.Path): wsl2 path
Returns:
windows path(pathlib.Path)
Raises:
UsageError: wsl2_path is not correct WSL2 path.
"""
try:
[(drive, path)] = re.findall(r"^/mnt/([a-z])(/?.*)", wsl2_path.as_posix())
except ValueError:
raise UsageError(f"The input path {wsl2_path.as_posix()} is not a correct WSL2 path "
f"(function {wsl2_full_path2windows_path.__name__} "
f"in module {__name__}).\n")
return reduce(lambda reduced, name: reduced.joinpath(name), p.Path(path).parts,
p.PureWindowsPath(rf"{drive}:\\"))
def is_wsl2_path(path: p.PurePath) -> bool:
"""
Whether the given path is a correct WSL2 path.
Args:
path(pathlib.Path): a path
Returns:
True if correct.
"""
return re.match(r"^/mnt/[a-z]/", path.as_posix()) is not None
def get_path(path_str: Optional[str]) -> p.Path:
"""
Convert the WSL2 path specified as the command line argument to a pathlib.Path object.
If nothing is specified, the current directory is used.
Args:
path_str(str): the command line argument
Returns:
path object(pathlib.Path)
"""
if path_str is None or len(path_str) == 0:
return p.Path(".").resolve()
return p.Path(path_str).resolve()
def open_on_windows(explorer: p.Path, path: p.Path) -> None:
"""
open path on Windows with explorer.exe
Args:
explorer(pathlib.Path): the path to the windows explorer.
path(pathlib.Path): the specified path.
Raises:
NotInspectableError: the specified path is not inspectable from Windows system.
"""
if is_wsl2_path(path):
windows_path: p.PureWindowsPath = wsl2_full_path2windows_path(path)
run([explorer, windows_path])
return
raise NotInspectableError(
f"The specified path {path.as_posix()} is not in the windows filesystem "
f"(function {open_on_windows.__name__} "
f"in module {__name__}).\n")
if __name__ == '__main__':
main()
sys.exit(0)
| 26.093023 | 93 | 0.623217 |
from __future__ import annotations
import dataclasses
import os
from subprocess import run
import re
import pathlib as p
import sys
from functools import reduce
from typing import Optional, MutableMapping
from docopt import docopt
from schema import Schema, SchemaError, Use, And
def main() -> None:
if os.name == "nt":
print(f"This tool {__file__} is usable only on WSL2.\n")
sys.exit(1)
try:
options: Options = read_options()
explorer: p.Path = p.Path(r"/mnt") / "c" / "Windows" / "explorer.exe"
open_on_windows(explorer, options.path)
except(UsageError, NotInspectableError) as e:
sys.stderr.write(e.args[0])
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
class Error(Exception):
pass
class NotInspectableError(Error):
pass
class UsageError(Error):
pass
@dataclasses.dataclass
class Options:
path: p.Path
def read_options() -> Options:
args: MutableMapping = docopt(__doc__)
schema = Schema({
"<path>": And(Use(get_path), lambda path: path.is_file() or path.is_dir(),
error=f"The specified path {args['<path>']}"
" does not exist.\n")
})
try:
args = schema.validate(args)
except SchemaError as e:
raise NotInspectableError(e.args[0])
return Options(args["<path>"])
def wsl2_full_path2windows_path(wsl2_path: p.Path) -> p.PureWindowsPath:
try:
[(drive, path)] = re.findall(r"^/mnt/([a-z])(/?.*)", wsl2_path.as_posix())
except ValueError:
raise UsageError(f"The input path {wsl2_path.as_posix()} is not a correct WSL2 path "
f"(function {wsl2_full_path2windows_path.__name__} "
f"in module {__name__}).\n")
return reduce(lambda reduced, name: reduced.joinpath(name), p.Path(path).parts,
p.PureWindowsPath(rf"{drive}:\\"))
def is_wsl2_path(path: p.PurePath) -> bool:
return re.match(r"^/mnt/[a-z]/", path.as_posix()) is not None
def get_path(path_str: Optional[str]) -> p.Path:
if path_str is None or len(path_str) == 0:
return p.Path(".").resolve()
return p.Path(path_str).resolve()
def open_on_windows(explorer: p.Path, path: p.Path) -> None:
if is_wsl2_path(path):
windows_path: p.PureWindowsPath = wsl2_full_path2windows_path(path)
run([explorer, windows_path])
return
raise NotInspectableError(
f"The specified path {path.as_posix()} is not in the windows filesystem "
f"(function {open_on_windows.__name__} "
f"in module {__name__}).\n")
if __name__ == '__main__':
main()
sys.exit(0)
| true | true |
1c320305e20833fd746725118724dbff700e7fbd | 22,271 | py | Python | cvxpy/tests/test_examples.py | rostyboost/cvxpy | 0eb2b20dab92407e4b45f13b6cc124ce96859515 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-10-21T22:15:55.000Z | 2020-10-21T22:15:55.000Z | cvxpy/tests/test_examples.py | yfzheng11/cvxpy | 95e728b01b6bb442c924812c7eac631019c5cbc6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/tests/test_examples.py | yfzheng11/cvxpy | 95e728b01b6bb442c924812c7eac631019c5cbc6 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-04-12T22:40:22.000Z | 2019-04-12T22:40:22.000Z | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import cvxpy as cvx
import cvxpy.interface as intf
from cvxpy.tests.base_test import BaseTest
from cvxpy.reductions.solvers.conic_solvers import ecos_conif
import numpy as np
import unittest
class TestExamples(BaseTest):
""" Unit tests using example problems. """
# Find the largest Euclidean ball in the polyhedron.
def test_chebyshev_center(self):
# The goal is to find the largest Euclidean ball (i.e. its center and
# radius) that lies in a polyhedron described by linear inequalities in this
# fashion: P = {x : a_i'*x <= b_i, i=1,...,m} where x is in R^2
# Generate the input data
a1 = np.array([2, 1])
a2 = np.array([2, -1])
a3 = np.array([-1, 2])
a4 = np.array([-1, -2])
b = np.ones(4)
# Create and solve the model
r = cvx.Variable(name='r')
x_c = cvx.Variable(2, name='x_c')
obj = cvx.Maximize(r)
constraints = [ # TODO have atoms compute values for constants.
a1.T*x_c + np.linalg.norm(a1)*r <= b[0],
a2.T*x_c + np.linalg.norm(a2)*r <= b[1],
a3.T*x_c + np.linalg.norm(a3)*r <= b[2],
a4.T*x_c + np.linalg.norm(a4)*r <= b[3],
]
p = cvx.Problem(obj, constraints)
result = p.solve()
self.assertAlmostEqual(result, 0.447214)
self.assertAlmostEqual(r.value, result)
self.assertItemsAlmostEqual(x_c.value, [0, 0])
# Test issue with numpy scalars.
def test_numpy_scalars(self):
n = 6
eps = 1e-6
np.random.seed(10)
P0 = np.random.randn(n, n)
eye = np.eye(n)
P0 = P0.T.dot(P0) + eps * eye
print(P0)
P1 = np.random.randn(n, n)
P1 = P1.T.dot(P1)
P2 = np.random.randn(n, n)
P2 = P2.T.dot(P2)
P3 = np.random.randn(n, n)
P3 = P3.T.dot(P3)
q0 = np.random.randn(n, 1)
q1 = np.random.randn(n, 1)
q2 = np.random.randn(n, 1)
q3 = np.random.randn(n, 1)
r0 = np.random.randn(1, 1)
r1 = np.random.randn(1, 1)
r2 = np.random.randn(1, 1)
r3 = np.random.randn(1, 1)
slack = cvx.Variable()
# Form the problem
x = cvx.Variable(n)
objective = cvx.Minimize(0.5*cvx.quad_form(x, P0) + q0.T*x + r0 + slack)
constraints = [0.5*cvx.quad_form(x, P1) + q1.T*x + r1 <= slack,
0.5*cvx.quad_form(x, P2) + q2.T*x + r2 <= slack,
0.5*cvx.quad_form(x, P3) + q3.T*x + r3 <= slack,
]
# We now find the primal result and compare it to the dual result
# to check if strong duality holds i.e. the duality gap is effectively zero
p = cvx.Problem(objective, constraints)
p.solve()
# Note that since our data is random,
# we may need to run this program multiple times to get a feasible primal
# When feasible, we can print out the following values
print(x.value) # solution
lam1 = constraints[0].dual_value
lam2 = constraints[1].dual_value
lam3 = constraints[2].dual_value
print(type(lam1))
P_lam = P0 + lam1*P1 + lam2*P2 + lam3*P3
q_lam = q0 + lam1*q1 + lam2*q2 + lam3*q3
r_lam = r0 + lam1*r1 + lam2*r2 + lam3*r3
dual_result = -0.5*q_lam.T.dot(P_lam).dot(q_lam) + r_lam
print(dual_result.shape)
self.assertEqual(intf.shape(dual_result), (1, 1))
# Tests examples from the README.
def test_readme_examples(self):
import numpy
numpy.random.seed(1)
# cvx.Problem data.
m = 30
n = 20
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
p = cvx.Problem(objective, constraints)
# The optimal objective is returned by p.solve().
p.solve()
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
####################################################
# Scalar variable.
a = cvx.Variable()
# Column vector variable of length 5.
x = cvx.Variable(5)
# Matrix variable with 4 rows and 7 columns.
A = cvx.Variable((4, 7))
####################################################
# Positive scalar parameter.
m = cvx.Parameter(nonneg=True)
# Column vector parameter with unknown sign (by default).
cvx.Parameter(5)
# Matrix parameter with negative entries.
G = cvx.Parameter((4, 7), nonpos=True)
# Assigns a constant value to G.
G.value = -numpy.ones((4, 7))
# Raises an error for assigning a value with invalid sign.
with self.assertRaises(Exception) as cm:
G.value = numpy.ones((4, 7))
self.assertEqual(str(cm.exception), "Parameter value must be nonpositive.")
####################################################
a = cvx.Variable()
x = cvx.Variable(5)
# expr is an Expression object after each assignment.
expr = 2*x
expr = expr - a
expr = cvx.sum(expr) + cvx.norm(x, 2)
####################################################
import numpy as np
# cvx.Problem data.
n = 10
m = 5
A = np.random.randn(n, m)
b = np.random.randn(n)
gamma = cvx.Parameter(nonneg=True)
# Construct the problem.
x = cvx.Variable(m)
objective = cvx.Minimize(cvx.sum_squares(A*x - b) + gamma*cvx.norm(x, 1))
p = cvx.Problem(objective)
# Assign a value to gamma and find the optimal x.
def get_x(gamma_value):
gamma.value = gamma_value
p.solve()
return x.value
gammas = np.logspace(-1, 2, num=2)
# Serial computation.
[get_x(value) for value in gammas]
####################################################
n = 10
mu = np.random.randn(1, n)
sigma = np.random.randn(n, n)
sigma = sigma.T.dot(sigma)
gamma = cvx.Parameter(nonneg=True)
gamma.value = 1
x = cvx.Variable(n)
# Constants:
# mu is the vector of expected returns.
# sigma is the covariance matrix.
# gamma is a cvx.Parameter that trades off risk and return.
# cvx.Variables:
# x is a vector of stock holdings as fractions of total assets.
expected_return = mu*x
risk = cvx.quad_form(x, sigma)
objective = cvx.Maximize(expected_return - gamma*risk)
p = cvx.Problem(objective, [cvx.sum(x) == 1])
p.solve()
# The optimal expected return.
print(expected_return.value)
# The optimal risk.
print(risk.value)
###########################################
N = 50
M = 40
n = 10
data = []
for i in range(N):
data += [(1, np.random.normal(loc=1.0, scale=2.0, size=n))]
for i in range(M):
data += [(-1, np.random.normal(loc=-1.0, scale=2.0, size=n))]
# Construct problem.
gamma = cvx.Parameter(nonneg=True)
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
a = cvx.Variable(n) # mi.SparseVar(n, nonzeros=6)
b = cvx.Variable()
slack = [cvx.pos(1 - label*(sample.T*a - b)) for (label, sample) in data]
objective = cvx.Minimize(cvx.norm(a, 2) + gamma*sum(slack))
p = cvx.Problem(objective)
# Extensions can attach new solve methods to the CVXPY cvx.Problem class.
# p.solve(method="admm")
p.solve()
# Count misclassifications.
errors = 0
for label, sample in data:
if label*(sample.T*a - b).value < 0:
errors += 1
print("%s misclassifications" % errors)
print(a.value)
print(b.value)
def test_advanced1(self):
"""Code from the advanced tutorial.
"""
# Solving a problem with different solvers.
x = cvx.Variable(2)
obj = cvx.Minimize(x[0] + cvx.norm(x, 1))
constraints = [x >= 2]
prob = cvx.Problem(obj, constraints)
# Solve with ECOS.
prob.solve(solver=cvx.ECOS)
print("optimal value with ECOS:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with ECOS_BB.
prob.solve(solver=cvx.ECOS_BB)
print("optimal value with ECOS_BB:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with CVXOPT.
if cvx.CVXOPT in cvx.installed_solvers():
prob.solve(solver=cvx.CVXOPT)
print("optimal value with CVXOPT:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with SCS.
prob.solve(solver=cvx.SCS)
print("optimal value with SCS:", prob.value)
self.assertAlmostEqual(prob.value, 6, places=2)
if cvx.CPLEX in cvx.installed_solvers():
# Solve with CPLEX.
prob.solve(solver=cvx.CPLEX)
print("optimal value with CPLEX:", prob.value)
self.assertAlmostEqual(prob.value, 6)
if cvx.GLPK in cvx.installed_solvers():
# Solve with GLPK.
prob.solve(solver=cvx.GLPK)
print("optimal value with GLPK:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with GLPK_MI.
prob.solve(solver=cvx.GLPK_MI)
print("optimal value with GLPK_MI:", prob.value)
self.assertAlmostEqual(prob.value, 6)
if cvx.GUROBI in cvx.installed_solvers():
# Solve with Gurobi.
prob.solve(solver=cvx.GUROBI)
print("optimal value with GUROBI:", prob.value)
self.assertAlmostEqual(prob.value, 6)
print(cvx.installed_solvers())
def test_log_det(self):
# Generate data
x = np.array([[0.55, 0.0],
[0.25, 0.35],
[-0.2, 0.2],
[-0.25, -0.1],
[-0.0, -0.3],
[0.4, -0.2]]).T
(n, m) = x.shape
# Create and solve the model
A = cvx.Variable((n, n))
b = cvx.Variable(n)
obj = cvx.Maximize(cvx.log_det(A))
constraints = []
for i in range(m):
constraints.append(cvx.norm(A*x[:, i] + b) <= 1)
p = cvx.Problem(obj, constraints)
result = p.solve()
self.assertAlmostEqual(result, 1.9746, places=2)
def test_portfolio_problem(self):
"""Test portfolio problem that caused dcp_attr errors.
"""
import numpy as np
import scipy.sparse as sp
np.random.seed(5)
n = 100 # 10000
m = 10 # 100
F = sp.rand(m, n, density=0.01)
F.data = np.ones(len(F.data))
D = sp.eye(n).tocoo()
D.data = np.random.randn(len(D.data))**2
Z = np.random.randn(m, 1)
Z = Z.dot(Z.T)
x = cvx.Variable(n)
y = x.__rmul__(F)
# DCP attr causes error because not all the curvature
# matrices are reduced to constants when an atom
# is scalar.
cvx.square(cvx.norm(D*x)) + cvx.square(Z*y)
def test_intro(self):
"""Test examples from cvxpy.org introduction.
"""
import numpy
# cvx.Problem data.
m = 30
n = 20
numpy.random.seed(1)
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cvx.Problem(objective, constraints)
# The optimal objective is returned by p.solve().
prob.solve()
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
########################################
# Create two scalar variables.
x = cvx.Variable()
y = cvx.Variable()
# Create two constraints.
constraints = [x + y == 1,
x - y >= 1]
# Form objective.
obj = cvx.Minimize(cvx.square(x - y))
# Form and solve problem.
prob = cvx.Problem(obj, constraints)
prob.solve() # Returns the optimal value.
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value, y.value)
########################################
# Create two scalar variables.
x = cvx.Variable()
y = cvx.Variable()
# Create two constraints.
constraints = [x + y == 1,
x - y >= 1]
# Form objective.
obj = cvx.Minimize(cvx.square(x - y))
# Form and solve problem.
prob = cvx.Problem(obj, constraints)
prob.solve() # Returns the optimal value.
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value, y.value)
self.assertEqual(prob.status, cvx.OPTIMAL)
self.assertAlmostEqual(prob.value, 1.0)
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 0)
########################################
# Replace the objective.
prob = cvx.Problem(cvx.Maximize(x + y), prob.constraints)
print("optimal value", prob.solve())
self.assertAlmostEqual(prob.value, 1.0, places=3)
# Replace the constraint (x + y == 1).
constraints = prob.constraints
constraints[0] = (x + y <= 3)
prob = cvx.Problem(prob.objective, constraints)
print("optimal value", prob.solve())
self.assertAlmostEqual(prob.value, 3.0, places=2)
########################################
x = cvx.Variable()
# An infeasible problem.
prob = cvx.Problem(cvx.Minimize(x), [x >= 1, x <= 0])
prob.solve()
print("status:", prob.status)
print("optimal value", prob.value)
self.assertEqual(prob.status, cvx.INFEASIBLE)
self.assertAlmostEqual(prob.value, np.inf)
# An unbounded problem.
prob = cvx.Problem(cvx.Minimize(x))
prob.solve()
print("status:", prob.status)
print("optimal value", prob.value)
self.assertEqual(prob.status, cvx.UNBOUNDED)
self.assertAlmostEqual(prob.value, -np.inf)
########################################
# A scalar variable.
cvx.Variable()
# Column vector variable of length 5.
x = cvx.Variable(5)
# Matrix variable with 4 rows and 7 columns.
A = cvx.Variable((4, 7))
########################################
import numpy
# cvx.Problem data.
m = 10
n = 5
numpy.random.seed(1)
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cvx.Problem(objective, constraints)
print("Optimal value", prob.solve())
print("Optimal var")
print(x.value) # A numpy matrix.
self.assertAlmostEqual(prob.value, 4.14133859146)
########################################
# Positive scalar parameter.
m = cvx.Parameter(nonneg=True)
# Column vector parameter with unknown sign (by default).
cvx.Parameter(5)
# Matrix parameter with negative entries.
G = cvx.Parameter((4, 7), nonpos=True)
# Assigns a constant value to G.
G.value = -numpy.ones((4, 7))
########################################
# Create parameter, then assign value.
rho = cvx.Parameter(nonneg=True)
rho.value = 2
# Initialize parameter with a value.
rho = cvx.Parameter(nonneg=True, value=2)
########################################
import numpy
# cvx.Problem data.
n = 15
m = 10
numpy.random.seed(1)
A = numpy.random.randn(n, m)
b = numpy.random.randn(n)
# gamma must be positive due to DCP rules.
gamma = cvx.Parameter(nonneg=True)
# Construct the problem.
x = cvx.Variable(m)
error = cvx.sum_squares(A*x - b)
obj = cvx.Minimize(error + gamma*cvx.norm(x, 1))
prob = cvx.Problem(obj)
# Construct a trade-off curve of ||Ax-b||^2 vs. ||x||_1
sq_penalty = []
l1_penalty = []
x_values = []
gamma_vals = numpy.logspace(-4, 6)
for val in gamma_vals:
gamma.value = val
prob.solve()
# Use expr.value to get the numerical value of
# an expression in the problem.
sq_penalty.append(error.value)
l1_penalty.append(cvx.norm(x, 1).value)
x_values.append(x.value)
########################################
import numpy
X = cvx.Variable((5, 4))
A = numpy.ones((3, 5))
# Use expr.size to get the dimensions.
print("dimensions of X:", X.size)
print("dimensions of sum(X):", cvx.sum(X).size)
print("dimensions of A*X:", (A*X).size)
# ValueError raised for invalid dimensions.
try:
A + X
except ValueError as e:
print(e)
def test_inpainting(self):
"""Test image in-painting.
"""
import numpy as np
np.random.seed(1)
rows, cols = 100, 100
# Load the images.
# Convert to arrays.
Uorig = np.random.randint(0, 255, size=(rows, cols))
rows, cols = Uorig.shape
# Known is 1 if the pixel is known,
# 0 if the pixel was corrupted.
Known = np.zeros((rows, cols))
for i in range(rows):
for j in range(cols):
if np.random.random() > 0.7:
Known[i, j] = 1
Ucorr = Known*Uorig
# Recover the original image using total variation in-painting.
U = cvx.Variable((rows, cols))
obj = cvx.Minimize(cvx.tv(U))
constraints = [cvx.multiply(Known, U) == cvx.multiply(Known, Ucorr)]
prob = cvx.Problem(obj, constraints)
prob.solve(solver=cvx.SCS)
def test_advanced2(self):
"""Test code from the advanced section of the tutorial.
"""
x = cvx.Variable()
prob = cvx.Problem(cvx.Minimize(cvx.square(x)), [x == 2])
# Get ECOS arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS)
# Get ECOS_BB arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS_BB)
# Get CVXOPT arguments.
if cvx.CVXOPT in cvx.installed_solvers():
data, chain, inverse = prob.get_problem_data(cvx.CVXOPT)
# Get SCS arguments.
data, chain, inverse = prob.get_problem_data(cvx.SCS)
import ecos
# Get ECOS arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS)
# Call ECOS solver.
solution = ecos.solve(data["c"], data["G"], data["h"],
ecos_conif.dims_to_solver_dict(data["dims"]),
data["A"], data["b"])
# Unpack raw solver output.
prob.unpack_results(solution, chain, inverse)
def test_log_sum_exp(self):
"""Test log_sum_exp function that failed in Github issue.
"""
import numpy as np
np.random.seed(1)
m = 5
n = 2
X = np.ones((m, n))
w = cvx.Variable(n)
expr2 = [cvx.log_sum_exp(cvx.hstack([0, X[i, :]*w])) for i in range(m)]
expr3 = sum(expr2)
obj = cvx.Minimize(expr3)
p = cvx.Problem(obj)
p.solve(solver=cvx.SCS, max_iters=1)
# # Risk return tradeoff curve
# def test_risk_return_tradeoff(self):
# from math import sqrt
# from cvxopt import matrix
# from cvxopt.blas import dot
# from cvxopt.solvers import qp, options
# import scipy
# n = 4
# S = matrix( [[ 4e-2, 6e-3, -4e-3, 0.0 ],
# [ 6e-3, 1e-2, 0.0, 0.0 ],
# [-4e-3, 0.0, 2.5e-3, 0.0 ],
# [ 0.0, 0.0, 0.0, 0.0 ]] )
# pbar = matrix([.12, .10, .07, .03])
# N = 100
# # CVXPY
# Sroot = numpy.asmatrix(scipy.linalg.sqrtm(S))
# x = cvx.Variable(n, name='x')
# mu = cvx.Parameter(name='mu')
# mu.value = 1 # TODO cvx.Parameter("positive")
# objective = cvx.Minimize(-pbar*x + mu*quad_over_lin(Sroot*x,1))
# constraints = [sum(x) == 1, x >= 0]
# p = cvx.Problem(objective, constraints)
# mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]
# xs = []
# for mu_val in mus:
# mu.value = mu_val
# p.solve()
# xs.append(x.value)
# returns = [ dot(pbar,x) for x in xs ]
# risks = [ sqrt(dot(x, S*x)) for x in xs ]
# # QP solver
if __name__ == '__main__':
unittest.main()
| 32.137085 | 84 | 0.532217 |
from __future__ import print_function
import cvxpy as cvx
import cvxpy.interface as intf
from cvxpy.tests.base_test import BaseTest
from cvxpy.reductions.solvers.conic_solvers import ecos_conif
import numpy as np
import unittest
class TestExamples(BaseTest):
def test_chebyshev_center(self):
# Generate the input data
a1 = np.array([2, 1])
a2 = np.array([2, -1])
a3 = np.array([-1, 2])
a4 = np.array([-1, -2])
b = np.ones(4)
# Create and solve the model
r = cvx.Variable(name='r')
x_c = cvx.Variable(2, name='x_c')
obj = cvx.Maximize(r)
constraints = [ # TODO have atoms compute values for constants.
a1.T*x_c + np.linalg.norm(a1)*r <= b[0],
a2.T*x_c + np.linalg.norm(a2)*r <= b[1],
a3.T*x_c + np.linalg.norm(a3)*r <= b[2],
a4.T*x_c + np.linalg.norm(a4)*r <= b[3],
]
p = cvx.Problem(obj, constraints)
result = p.solve()
self.assertAlmostEqual(result, 0.447214)
self.assertAlmostEqual(r.value, result)
self.assertItemsAlmostEqual(x_c.value, [0, 0])
# Test issue with numpy scalars.
def test_numpy_scalars(self):
n = 6
eps = 1e-6
np.random.seed(10)
P0 = np.random.randn(n, n)
eye = np.eye(n)
P0 = P0.T.dot(P0) + eps * eye
print(P0)
P1 = np.random.randn(n, n)
P1 = P1.T.dot(P1)
P2 = np.random.randn(n, n)
P2 = P2.T.dot(P2)
P3 = np.random.randn(n, n)
P3 = P3.T.dot(P3)
q0 = np.random.randn(n, 1)
q1 = np.random.randn(n, 1)
q2 = np.random.randn(n, 1)
q3 = np.random.randn(n, 1)
r0 = np.random.randn(1, 1)
r1 = np.random.randn(1, 1)
r2 = np.random.randn(1, 1)
r3 = np.random.randn(1, 1)
slack = cvx.Variable()
# Form the problem
x = cvx.Variable(n)
objective = cvx.Minimize(0.5*cvx.quad_form(x, P0) + q0.T*x + r0 + slack)
constraints = [0.5*cvx.quad_form(x, P1) + q1.T*x + r1 <= slack,
0.5*cvx.quad_form(x, P2) + q2.T*x + r2 <= slack,
0.5*cvx.quad_form(x, P3) + q3.T*x + r3 <= slack,
]
# We now find the primal result and compare it to the dual result
# to check if strong duality holds i.e. the duality gap is effectively zero
p = cvx.Problem(objective, constraints)
p.solve()
# Note that since our data is random,
# we may need to run this program multiple times to get a feasible primal
# When feasible, we can print out the following values
print(x.value) # solution
lam1 = constraints[0].dual_value
lam2 = constraints[1].dual_value
lam3 = constraints[2].dual_value
print(type(lam1))
P_lam = P0 + lam1*P1 + lam2*P2 + lam3*P3
q_lam = q0 + lam1*q1 + lam2*q2 + lam3*q3
r_lam = r0 + lam1*r1 + lam2*r2 + lam3*r3
dual_result = -0.5*q_lam.T.dot(P_lam).dot(q_lam) + r_lam
print(dual_result.shape)
self.assertEqual(intf.shape(dual_result), (1, 1))
# Tests examples from the README.
def test_readme_examples(self):
import numpy
numpy.random.seed(1)
# cvx.Problem data.
m = 30
n = 20
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
p = cvx.Problem(objective, constraints)
# The optimal objective is returned by p.solve().
p.solve()
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
####################################################
# Scalar variable.
a = cvx.Variable()
# Column vector variable of length 5.
x = cvx.Variable(5)
# Matrix variable with 4 rows and 7 columns.
A = cvx.Variable((4, 7))
####################################################
# Positive scalar parameter.
m = cvx.Parameter(nonneg=True)
# Column vector parameter with unknown sign (by default).
cvx.Parameter(5)
# Matrix parameter with negative entries.
G = cvx.Parameter((4, 7), nonpos=True)
# Assigns a constant value to G.
G.value = -numpy.ones((4, 7))
# Raises an error for assigning a value with invalid sign.
with self.assertRaises(Exception) as cm:
G.value = numpy.ones((4, 7))
self.assertEqual(str(cm.exception), "Parameter value must be nonpositive.")
####################################################
a = cvx.Variable()
x = cvx.Variable(5)
# expr is an Expression object after each assignment.
expr = 2*x
expr = expr - a
expr = cvx.sum(expr) + cvx.norm(x, 2)
####################################################
import numpy as np
# cvx.Problem data.
n = 10
m = 5
A = np.random.randn(n, m)
b = np.random.randn(n)
gamma = cvx.Parameter(nonneg=True)
# Construct the problem.
x = cvx.Variable(m)
objective = cvx.Minimize(cvx.sum_squares(A*x - b) + gamma*cvx.norm(x, 1))
p = cvx.Problem(objective)
# Assign a value to gamma and find the optimal x.
def get_x(gamma_value):
gamma.value = gamma_value
p.solve()
return x.value
gammas = np.logspace(-1, 2, num=2)
# Serial computation.
[get_x(value) for value in gammas]
####################################################
n = 10
mu = np.random.randn(1, n)
sigma = np.random.randn(n, n)
sigma = sigma.T.dot(sigma)
gamma = cvx.Parameter(nonneg=True)
gamma.value = 1
x = cvx.Variable(n)
# Constants:
# mu is the vector of expected returns.
# sigma is the covariance matrix.
# gamma is a cvx.Parameter that trades off risk and return.
# cvx.Variables:
# x is a vector of stock holdings as fractions of total assets.
expected_return = mu*x
risk = cvx.quad_form(x, sigma)
objective = cvx.Maximize(expected_return - gamma*risk)
p = cvx.Problem(objective, [cvx.sum(x) == 1])
p.solve()
# The optimal expected return.
print(expected_return.value)
# The optimal risk.
print(risk.value)
###########################################
N = 50
M = 40
n = 10
data = []
for i in range(N):
data += [(1, np.random.normal(loc=1.0, scale=2.0, size=n))]
for i in range(M):
data += [(-1, np.random.normal(loc=-1.0, scale=2.0, size=n))]
# Construct problem.
gamma = cvx.Parameter(nonneg=True)
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
a = cvx.Variable(n) # mi.SparseVar(n, nonzeros=6)
b = cvx.Variable()
slack = [cvx.pos(1 - label*(sample.T*a - b)) for (label, sample) in data]
objective = cvx.Minimize(cvx.norm(a, 2) + gamma*sum(slack))
p = cvx.Problem(objective)
# Extensions can attach new solve methods to the CVXPY cvx.Problem class.
# p.solve(method="admm")
p.solve()
# Count misclassifications.
errors = 0
for label, sample in data:
if label*(sample.T*a - b).value < 0:
errors += 1
print("%s misclassifications" % errors)
print(a.value)
print(b.value)
def test_advanced1(self):
# Solving a problem with different solvers.
x = cvx.Variable(2)
obj = cvx.Minimize(x[0] + cvx.norm(x, 1))
constraints = [x >= 2]
prob = cvx.Problem(obj, constraints)
# Solve with ECOS.
prob.solve(solver=cvx.ECOS)
print("optimal value with ECOS:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with ECOS_BB.
prob.solve(solver=cvx.ECOS_BB)
print("optimal value with ECOS_BB:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with CVXOPT.
if cvx.CVXOPT in cvx.installed_solvers():
prob.solve(solver=cvx.CVXOPT)
print("optimal value with CVXOPT:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with SCS.
prob.solve(solver=cvx.SCS)
print("optimal value with SCS:", prob.value)
self.assertAlmostEqual(prob.value, 6, places=2)
if cvx.CPLEX in cvx.installed_solvers():
# Solve with CPLEX.
prob.solve(solver=cvx.CPLEX)
print("optimal value with CPLEX:", prob.value)
self.assertAlmostEqual(prob.value, 6)
if cvx.GLPK in cvx.installed_solvers():
# Solve with GLPK.
prob.solve(solver=cvx.GLPK)
print("optimal value with GLPK:", prob.value)
self.assertAlmostEqual(prob.value, 6)
# Solve with GLPK_MI.
prob.solve(solver=cvx.GLPK_MI)
print("optimal value with GLPK_MI:", prob.value)
self.assertAlmostEqual(prob.value, 6)
if cvx.GUROBI in cvx.installed_solvers():
# Solve with Gurobi.
prob.solve(solver=cvx.GUROBI)
print("optimal value with GUROBI:", prob.value)
self.assertAlmostEqual(prob.value, 6)
print(cvx.installed_solvers())
def test_log_det(self):
# Generate data
x = np.array([[0.55, 0.0],
[0.25, 0.35],
[-0.2, 0.2],
[-0.25, -0.1],
[-0.0, -0.3],
[0.4, -0.2]]).T
(n, m) = x.shape
# Create and solve the model
A = cvx.Variable((n, n))
b = cvx.Variable(n)
obj = cvx.Maximize(cvx.log_det(A))
constraints = []
for i in range(m):
constraints.append(cvx.norm(A*x[:, i] + b) <= 1)
p = cvx.Problem(obj, constraints)
result = p.solve()
self.assertAlmostEqual(result, 1.9746, places=2)
def test_portfolio_problem(self):
import numpy as np
import scipy.sparse as sp
np.random.seed(5)
n = 100 # 10000
m = 10 # 100
F = sp.rand(m, n, density=0.01)
F.data = np.ones(len(F.data))
D = sp.eye(n).tocoo()
D.data = np.random.randn(len(D.data))**2
Z = np.random.randn(m, 1)
Z = Z.dot(Z.T)
x = cvx.Variable(n)
y = x.__rmul__(F)
# DCP attr causes error because not all the curvature
# matrices are reduced to constants when an atom
# is scalar.
cvx.square(cvx.norm(D*x)) + cvx.square(Z*y)
def test_intro(self):
import numpy
# cvx.Problem data.
m = 30
n = 20
numpy.random.seed(1)
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cvx.Problem(objective, constraints)
# The optimal objective is returned by p.solve().
prob.solve()
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
########################################
# Create two scalar variables.
x = cvx.Variable()
y = cvx.Variable()
# Create two constraints.
constraints = [x + y == 1,
x - y >= 1]
# Form objective.
obj = cvx.Minimize(cvx.square(x - y))
# Form and solve problem.
prob = cvx.Problem(obj, constraints)
prob.solve() # Returns the optimal value.
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value, y.value)
########################################
# Create two scalar variables.
x = cvx.Variable()
y = cvx.Variable()
# Create two constraints.
constraints = [x + y == 1,
x - y >= 1]
# Form objective.
obj = cvx.Minimize(cvx.square(x - y))
# Form and solve problem.
prob = cvx.Problem(obj, constraints)
prob.solve() # Returns the optimal value.
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value, y.value)
self.assertEqual(prob.status, cvx.OPTIMAL)
self.assertAlmostEqual(prob.value, 1.0)
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 0)
########################################
# Replace the objective.
prob = cvx.Problem(cvx.Maximize(x + y), prob.constraints)
print("optimal value", prob.solve())
self.assertAlmostEqual(prob.value, 1.0, places=3)
# Replace the constraint (x + y == 1).
constraints = prob.constraints
constraints[0] = (x + y <= 3)
prob = cvx.Problem(prob.objective, constraints)
print("optimal value", prob.solve())
self.assertAlmostEqual(prob.value, 3.0, places=2)
########################################
x = cvx.Variable()
# An infeasible problem.
prob = cvx.Problem(cvx.Minimize(x), [x >= 1, x <= 0])
prob.solve()
print("status:", prob.status)
print("optimal value", prob.value)
self.assertEqual(prob.status, cvx.INFEASIBLE)
self.assertAlmostEqual(prob.value, np.inf)
# An unbounded problem.
prob = cvx.Problem(cvx.Minimize(x))
prob.solve()
print("status:", prob.status)
print("optimal value", prob.value)
self.assertEqual(prob.status, cvx.UNBOUNDED)
self.assertAlmostEqual(prob.value, -np.inf)
########################################
# A scalar variable.
cvx.Variable()
# Column vector variable of length 5.
x = cvx.Variable(5)
# Matrix variable with 4 rows and 7 columns.
A = cvx.Variable((4, 7))
########################################
import numpy
# cvx.Problem data.
m = 10
n = 5
numpy.random.seed(1)
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = cvx.Variable(n)
objective = cvx.Minimize(cvx.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cvx.Problem(objective, constraints)
print("Optimal value", prob.solve())
print("Optimal var")
print(x.value) # A numpy matrix.
self.assertAlmostEqual(prob.value, 4.14133859146)
########################################
# Positive scalar parameter.
m = cvx.Parameter(nonneg=True)
# Column vector parameter with unknown sign (by default).
cvx.Parameter(5)
# Matrix parameter with negative entries.
G = cvx.Parameter((4, 7), nonpos=True)
# Assigns a constant value to G.
G.value = -numpy.ones((4, 7))
########################################
# Create parameter, then assign value.
rho = cvx.Parameter(nonneg=True)
rho.value = 2
# Initialize parameter with a value.
rho = cvx.Parameter(nonneg=True, value=2)
########################################
import numpy
# cvx.Problem data.
n = 15
m = 10
numpy.random.seed(1)
A = numpy.random.randn(n, m)
b = numpy.random.randn(n)
# gamma must be positive due to DCP rules.
gamma = cvx.Parameter(nonneg=True)
# Construct the problem.
x = cvx.Variable(m)
error = cvx.sum_squares(A*x - b)
obj = cvx.Minimize(error + gamma*cvx.norm(x, 1))
prob = cvx.Problem(obj)
# Construct a trade-off curve of ||Ax-b||^2 vs. ||x||_1
sq_penalty = []
l1_penalty = []
x_values = []
gamma_vals = numpy.logspace(-4, 6)
for val in gamma_vals:
gamma.value = val
prob.solve()
# Use expr.value to get the numerical value of
# an expression in the problem.
sq_penalty.append(error.value)
l1_penalty.append(cvx.norm(x, 1).value)
x_values.append(x.value)
########################################
import numpy
X = cvx.Variable((5, 4))
A = numpy.ones((3, 5))
# Use expr.size to get the dimensions.
print("dimensions of X:", X.size)
print("dimensions of sum(X):", cvx.sum(X).size)
print("dimensions of A*X:", (A*X).size)
# ValueError raised for invalid dimensions.
try:
A + X
except ValueError as e:
print(e)
def test_inpainting(self):
import numpy as np
np.random.seed(1)
rows, cols = 100, 100
# Load the images.
# Convert to arrays.
Uorig = np.random.randint(0, 255, size=(rows, cols))
rows, cols = Uorig.shape
# Known is 1 if the pixel is known,
# 0 if the pixel was corrupted.
Known = np.zeros((rows, cols))
for i in range(rows):
for j in range(cols):
if np.random.random() > 0.7:
Known[i, j] = 1
Ucorr = Known*Uorig
# Recover the original image using total variation in-painting.
U = cvx.Variable((rows, cols))
obj = cvx.Minimize(cvx.tv(U))
constraints = [cvx.multiply(Known, U) == cvx.multiply(Known, Ucorr)]
prob = cvx.Problem(obj, constraints)
prob.solve(solver=cvx.SCS)
def test_advanced2(self):
x = cvx.Variable()
prob = cvx.Problem(cvx.Minimize(cvx.square(x)), [x == 2])
# Get ECOS arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS)
# Get ECOS_BB arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS_BB)
# Get CVXOPT arguments.
if cvx.CVXOPT in cvx.installed_solvers():
data, chain, inverse = prob.get_problem_data(cvx.CVXOPT)
# Get SCS arguments.
data, chain, inverse = prob.get_problem_data(cvx.SCS)
import ecos
# Get ECOS arguments.
data, chain, inverse = prob.get_problem_data(cvx.ECOS)
# Call ECOS solver.
solution = ecos.solve(data["c"], data["G"], data["h"],
ecos_conif.dims_to_solver_dict(data["dims"]),
data["A"], data["b"])
# Unpack raw solver output.
prob.unpack_results(solution, chain, inverse)
def test_log_sum_exp(self):
import numpy as np
np.random.seed(1)
m = 5
n = 2
X = np.ones((m, n))
w = cvx.Variable(n)
expr2 = [cvx.log_sum_exp(cvx.hstack([0, X[i, :]*w])) for i in range(m)]
expr3 = sum(expr2)
obj = cvx.Minimize(expr3)
p = cvx.Problem(obj)
p.solve(solver=cvx.SCS, max_iters=1)
# # Risk return tradeoff curve
# def test_risk_return_tradeoff(self):
# from math import sqrt
# from cvxopt import matrix
# from cvxopt.blas import dot
# from cvxopt.solvers import qp, options
# import scipy
# n = 4
# S = matrix( [[ 4e-2, 6e-3, -4e-3, 0.0 ],
# [ 6e-3, 1e-2, 0.0, 0.0 ],
# [-4e-3, 0.0, 2.5e-3, 0.0 ],
# [ 0.0, 0.0, 0.0, 0.0 ]] )
# pbar = matrix([.12, .10, .07, .03])
# N = 100
# # CVXPY
# Sroot = numpy.asmatrix(scipy.linalg.sqrtm(S))
# x = cvx.Variable(n, name='x')
# mu = cvx.Parameter(name='mu')
# mu.value = 1 # TODO cvx.Parameter("positive")
# objective = cvx.Minimize(-pbar*x + mu*quad_over_lin(Sroot*x,1))
# constraints = [sum(x) == 1, x >= 0]
# p = cvx.Problem(objective, constraints)
# mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]
# xs = []
# for mu_val in mus:
# mu.value = mu_val
# p.solve()
# xs.append(x.value)
# returns = [ dot(pbar,x) for x in xs ]
# risks = [ sqrt(dot(x, S*x)) for x in xs ]
# # QP solver
if __name__ == '__main__':
unittest.main()
| true | true |
1c32035411faf1edc3412842970183b236a6dbfa | 20,364 | py | Python | lib/galaxy/visualization/plugins/config_parser.py | mmiladi/galaxy | 7857b152cd10d9490ac2433ff2905ca1a47ee32c | [
"CC-BY-3.0"
] | 4 | 2018-10-29T18:34:38.000Z | 2021-09-29T23:30:42.000Z | lib/galaxy/visualization/plugins/config_parser.py | mmiladi/galaxy | 7857b152cd10d9490ac2433ff2905ca1a47ee32c | [
"CC-BY-3.0"
] | 1 | 2019-02-04T16:21:27.000Z | 2019-02-04T16:45:17.000Z | lib/galaxy/visualization/plugins/config_parser.py | mmiladi/galaxy | 7857b152cd10d9490ac2433ff2905ca1a47ee32c | [
"CC-BY-3.0"
] | 3 | 2020-02-12T15:22:24.000Z | 2021-08-19T10:27:39.000Z | from six import string_types
import galaxy.model
from galaxy import util
import logging
log = logging.getLogger(__name__)
class ParsingException(ValueError):
"""
An exception class for errors that occur during parsing of the visualizations
framework configuration XML file.
"""
pass
class VisualizationsConfigParser(object):
"""
Class that parses a visualizations configuration XML file.
Each visualization will get the following info:
- how to load a visualization:
-- how to find the proper template
-- how to convert query string into DB models
- when/how to generate a link to the visualization
-- what provides the data
-- what information needs to be added to the query string
"""
#: what are the allowed 'entry_point_type' for entry_point elements
ALLOWED_ENTRY_POINT_TYPES = ['mako', 'html', 'script']
#: what are the allowed href targets when clicking on a visualization anchor
VALID_RENDER_TARGETS = ['galaxy_main', '_top', '_blank']
def __init__(self):
# what parsers should be used for sub-components
self.data_source_parser = DataSourceParser()
self.param_parser = ParamParser()
self.param_modifier_parser = ParamModifierParser()
def parse_file(self, xml_filepath):
"""
Parse the given XML file for visualizations data.
:returns: visualization config dictionary
"""
xml_tree = util.parse_xml(xml_filepath)
visualization = self.parse_visualization(xml_tree.getroot())
return visualization
def parse_visualization(self, xml_tree):
"""
Parse the template, name, and any data_sources and params from the
given `xml_tree` for a visualization.
"""
returned = {}
# main tag specifies plugin type (visualization or
# interactive_enviornment).
returned['plugin_type'] = xml_tree.tag
# a text display name for end user links
returned['name'] = xml_tree.attrib.get('name', None)
if not returned['name']:
raise ParsingException('visualization needs a name attribute')
# allow manually turning off a vis by checking for a disabled property
if 'disabled' in xml_tree.attrib:
log.info('Visualizations plugin disabled: %s. Skipping...', returned['name'])
return None
# record the embeddable flag - defaults to false
# this is a design by contract promise that the visualization can be rendered inside another page
# often by rendering only a DOM fragment. Since this is an advanced feature that requires a bit more
# work from the creator's side - it defaults to False
returned['embeddable'] = False
if 'embeddable' in xml_tree.attrib:
returned['embeddable'] = xml_tree.attrib.get('embeddable', False) == 'true'
# a (for now) text description of what the visualization does
description = xml_tree.find('description')
returned['description'] = description.text.strip() if description is not None else None
# data_sources are the kinds of objects/data associated with the visualization
# e.g. views on HDAs can use this to find out what visualizations are applicable to them
data_sources = []
data_sources_confs = xml_tree.find('data_sources')
for data_source_conf in data_sources_confs.findall('data_source'):
data_source = self.data_source_parser.parse(data_source_conf)
if data_source:
data_sources.append(data_source)
# data_sources are not required
if not data_sources:
raise ParsingException('No valid data_sources for visualization')
returned['data_sources'] = data_sources
# TODO: this is effectively required due to param_confs.findall( 'param' )
# parameters spell out how to convert query string params into resources and data
# that will be parsed, fetched, etc. and passed to the template
# list or dict? ordered or not?
params = {}
param_confs = xml_tree.find('params')
param_elements = param_confs.findall('param') if param_confs is not None else []
for param_conf in param_elements:
param = self.param_parser.parse(param_conf)
if param:
params[param_conf.text] = param
# params are not required
if params:
returned['params'] = params
# param modifiers provide extra information for other params (e.g. hda_ldda='hda' -> dataset_id is an hda id)
# store these modifiers in a 2-level dictionary { target_param: { param_modifier_key: { param_mod_data }
# ugh - wish we didn't need these
param_modifiers = {}
param_modifier_elements = param_confs.findall('param_modifier') if param_confs is not None else []
for param_modifier_conf in param_modifier_elements:
param_modifier = self.param_modifier_parser.parse(param_modifier_conf)
# param modifiers map accrd. to the params they modify (for faster lookup)
target_param = param_modifier_conf.get('modifies')
param_modifier_key = param_modifier_conf.text
if param_modifier and target_param in params:
# multiple params can modify a single, other param,
# so store in a sub-dict, initializing if this is the first
if target_param not in param_modifiers:
param_modifiers[target_param] = {}
param_modifiers[target_param][param_modifier_key] = param_modifier
# not required
if param_modifiers:
returned['param_modifiers'] = param_modifiers
# entry_point: how will this plugin render/load? mako, script tag, or static html file?
returned['entry_point'] = self.parse_entry_point(xml_tree)
# link_text: the string to use for the text of any links/anchors to this visualization
link_text = xml_tree.find('link_text')
if link_text is not None and link_text.text:
returned['link_text'] = link_text
# render_target: where in the browser to open the rendered visualization
# defaults to: galaxy_main
render_target = xml_tree.find('render_target')
if((render_target is not None and render_target.text) and
(render_target.text in self.VALID_RENDER_TARGETS)):
returned['render_target'] = render_target.text
else:
returned['render_target'] = 'galaxy_main'
# consider unifying the above into its own element and parsing method
return returned
def parse_entry_point(self, xml_tree):
"""
Parse the config file for an appropriate entry point: a mako template, a script tag,
or an html file, returning as dictionary with: `type`, `file`, and `attr`ibutes of
the element.
"""
# (older) mako-only syntax: the template to use in rendering the visualization
template = xml_tree.find('template')
if template is not None and template.text:
log.info('template syntax is deprecated: use entry_point instead')
return {
'type' : 'mako',
'file' : template.text,
'attr' : {}
}
# need one of the two: (the deprecated) template or entry_point
entry_point = xml_tree.find('entry_point')
if entry_point is None:
raise ParsingException('template or entry_point required')
# parse by returning a sub-object and simply copying any attributes unused here
entry_point_attrib = entry_point.attrib.copy()
entry_point_type = entry_point_attrib.pop('entry_point_type', 'mako')
if entry_point_type not in self.ALLOWED_ENTRY_POINT_TYPES:
raise ParsingException('Unknown entry_point type: ' + entry_point_type)
return {
'type' : entry_point_type,
'file' : entry_point.text,
'attr' : entry_point_attrib
}
# -------------------------------------------------------------------
class DataSourceParser(object):
"""
Component class of VisualizationsConfigParser that parses data_source elements
within visualization elements.
data_sources are (in the extreme) any object that can be used to produce
data for the visualization to consume (e.g. HDAs, LDDAs, Jobs, Users, etc.).
There can be more than one data_source associated with a visualization.
"""
# these are the allowed classes to associate visualizations with (as strings)
# any model_class element not in this list will throw a parsing ParsingExcepion
ALLOWED_MODEL_CLASSES = [
'Visualization',
'HistoryDatasetAssociation',
'LibraryDatasetDatasetAssociation'
]
ATTRIBUTE_SPLIT_CHAR = '.'
# these are the allowed object attributes to use in data source tests
# any attribute element not in this list will throw a parsing ParsingExcepion
ALLOWED_DATA_SOURCE_ATTRIBUTES = [
'datatype'
]
def parse(self, xml_tree):
"""
Return a visualization data_source dictionary parsed from the given
XML element.
"""
returned = {}
# model_class (required, only one) - look up and convert model_class to actual galaxy model class
model_class = self.parse_model_class(xml_tree.find('model_class'))
if not model_class:
raise ParsingException('data_source needs a model class')
returned['model_class'] = model_class
# tests (optional, 0 or more) - data for boolean test: 'is the visualization usable by this object?'
# when no tests are given, default to isinstance( object, model_class )
returned['tests'] = self.parse_tests(xml_tree.findall('test'))
# to_params (optional, 0 or more) - tells the registry to set certain params based on the model_clas, tests
returned['to_params'] = {}
to_params = self.parse_to_params(xml_tree.findall('to_param'))
if to_params:
returned['to_params'] = to_params
return returned
def parse_model_class(self, xml_tree):
"""
Convert xml model_class element to a galaxy model class
(or None if model class is not found).
This element is required and only the first element is used.
The model_class string must be in ALLOWED_MODEL_CLASSES.
"""
if xml_tree is None or not xml_tree.text:
raise ParsingException('data_source entry requires a model_class')
if xml_tree.text not in self.ALLOWED_MODEL_CLASSES:
# log.debug( 'available data_source model_classes: %s' %( str( self.ALLOWED_MODEL_CLASSES ) ) )
raise ParsingException('Invalid data_source model_class: %s' % (xml_tree.text))
# look up the model from the model module returning an empty data_source if not found
model_class = getattr(galaxy.model, xml_tree.text, None)
return model_class
def _build_getattr_lambda(self, attr_name_list):
"""
Recursively builds a compound lambda function of getattr's
from the attribute names given in `attr_name_list`.
"""
if len(attr_name_list) == 0:
# identity - if list is empty, return object itself
return lambda o: o
next_attr_name = attr_name_list[-1]
if len(attr_name_list) == 1:
# recursive base case
return lambda o: getattr(o, next_attr_name)
# recursive case
return lambda o: getattr(self._build_getattr_lambda(attr_name_list[:-1])(o), next_attr_name)
def parse_tests(self, xml_tree_list):
"""
Returns a list of test dictionaries that the registry can use
against a given object to determine if the visualization can be
used with the object.
"""
# tests should NOT include expensive operations: reading file data, running jobs, etc.
# do as much here as possible to reduce the overhead of seeing if a visualization is applicable
# currently tests are or'd only (could be and'd or made into compound boolean tests)
tests = []
if not xml_tree_list:
return tests
for test_elem in xml_tree_list:
test_type = test_elem.get('type', 'eq')
test_result = test_elem.text.strip() if test_elem.text else None
if not test_type or not test_result:
log.warning('Skipping test. Needs both type attribute and text node to be parsed: ' +
'%s, %s' % (test_type, test_elem.text))
continue
test_result = test_result.strip()
# test_attr can be a dot separated chain of object attributes (e.g. dataset.datatype) - convert to list
# TODO: too dangerous - constrain these to some allowed list
# TODO: does this err if no test_attr - it should...
test_attr = test_elem.get('test_attr')
test_attr = test_attr.split(self.ATTRIBUTE_SPLIT_CHAR) if isinstance(test_attr, string_types) else []
# log.debug( 'test_type: %s, test_attr: %s, test_result: %s', test_type, test_attr, test_result )
# build a lambda function that gets the desired attribute to test
getter = self._build_getattr_lambda(test_attr)
# result type should tell the registry how to convert the result before the test
test_result_type = test_elem.get('result_type', 'string')
# test functions should be sent an object to test, and the parsed result expected from the test
if test_type == 'isinstance':
# is test_attr attribute an instance of result
# TODO: wish we could take this further but it would mean passing in the datatypes_registry
def test_fn(o, result):
return isinstance(getter(o), result)
elif test_type == 'has_dataprovider':
# does the object itself have a datatype attr and does that datatype have the given dataprovider
def test_fn(o, result):
return (hasattr(getter(o), 'has_dataprovider') and
getter(o).has_dataprovider(result))
elif test_type == 'has_attribute':
# does the object itself have attr in 'result' (no equivalence checking)
def test_fn(o, result):
return hasattr(getter(o), result)
elif test_type == 'not_eq':
def test_fn(o, result):
return str(getter(o)) != result
else:
# default to simple (string) equilavance (coercing the test_attr to a string)
def test_fn(o, result):
return str(getter(o)) == result
tests.append({
'type' : test_type,
'result' : test_result,
'result_type' : test_result_type,
'fn' : test_fn
})
return tests
def parse_to_params(self, xml_tree_list):
"""
Given a list of `to_param` elements, returns a dictionary that allows
the registry to convert the data_source into one or more appropriate
params for the visualization.
"""
to_param_dict = {}
if not xml_tree_list:
return to_param_dict
for element in xml_tree_list:
# param_name required
param_name = element.text
if not param_name:
raise ParsingException('to_param requires text (the param name)')
param = {}
# assign is a shortcut param_attr that assigns a value to a param (as text)
assign = element.get('assign')
if assign is not None:
param['assign'] = assign
# param_attr is the attribute of the object (that the visualization will be applied to)
# that should be converted into a query param (e.g. param_attr="id" -> dataset_id)
# TODO:?? use the build attr getter here?
# simple (1 lvl) attrs for now
param_attr = element.get('param_attr')
if param_attr is not None:
param['param_attr'] = param_attr
# element must have either param_attr or assign? what about no params (the object itself)
if not param_attr and not assign:
raise ParsingException('to_param requires either assign or param_attr attributes: %s', param_name)
# TODO: consider making the to_param name an attribute (param="hda_ldda") and the text what would
# be used for the conversion - this would allow CDATA values to be passed
# <to_param param="json" type="assign"><![CDATA[{ "one": 1, "two": 2 }]]></to_param>
if param:
to_param_dict[param_name] = param
return to_param_dict
class ParamParser(object):
"""
Component class of VisualizationsConfigParser that parses param elements
within visualization elements.
params are parameters that will be parsed (based on their `type`, etc.)
and sent to the visualization template by controllers.visualization.render.
"""
DEFAULT_PARAM_TYPE = 'str'
def parse(self, xml_tree):
"""
Parse a visualization parameter from the given `xml_tree`.
"""
returned = {}
# don't store key, just check it
param_key = xml_tree.text
if not param_key:
raise ParsingException('Param entry requires text')
returned['type'] = self.parse_param_type(xml_tree)
# is the parameter required in the template and,
# if not, what is the default value?
required = xml_tree.get('required') == "true"
returned['required'] = required
if not required:
# default defaults to None
default = None
if 'default' in xml_tree.attrib:
default = xml_tree.get('default')
# convert default based on param_type here
returned['default'] = default
# does the param have to be within a list of certain values
# NOTE: the interpretation of this list is deferred till parsing and based on param type
# e.g. it could be 'val in constrain_to', or 'constrain_to is min, max for number', etc.
# TODO: currently unused
constrain_to = xml_tree.get('constrain_to')
if constrain_to:
returned['constrain_to'] = constrain_to.split(',')
# is the param a comma-separated-value list?
returned['csv'] = xml_tree.get('csv') == "true"
# remap keys in the params/query string to the var names used in the template
var_name_in_template = xml_tree.get('var_name_in_template')
if var_name_in_template:
returned['var_name_in_template'] = var_name_in_template
return returned
def parse_param_type(self, xml_tree):
"""
Parse a param type from the given `xml_tree`.
"""
# default to string as param_type
param_type = xml_tree.get('type') or self.DEFAULT_PARAM_TYPE
# TODO: set parsers and validaters, convert here
return param_type
class ParamModifierParser(ParamParser):
"""
Component class of VisualizationsConfigParser that parses param_modifier
elements within visualization elements.
param_modifiers are params from a dictionary (such as a query string)
that are not standalone but modify the parsing/conversion of a separate
(normal) param (e.g. 'hda_ldda' can equal 'hda' or 'ldda' and control
whether a visualizations 'dataset_id' param is for an HDA or LDDA).
"""
def parse(self, element):
# modifies is required
modifies = element.get('modifies')
if not modifies:
raise ParsingException('param_modifier entry requires a target param key (attribute "modifies")')
returned = super(ParamModifierParser, self).parse(element)
return returned
| 43.982721 | 117 | 0.638283 | from six import string_types
import galaxy.model
from galaxy import util
import logging
log = logging.getLogger(__name__)
class ParsingException(ValueError):
pass
class VisualizationsConfigParser(object):
ALLOWED_ENTRY_POINT_TYPES = ['mako', 'html', 'script']
VALID_RENDER_TARGETS = ['galaxy_main', '_top', '_blank']
def __init__(self):
self.data_source_parser = DataSourceParser()
self.param_parser = ParamParser()
self.param_modifier_parser = ParamModifierParser()
def parse_file(self, xml_filepath):
xml_tree = util.parse_xml(xml_filepath)
visualization = self.parse_visualization(xml_tree.getroot())
return visualization
def parse_visualization(self, xml_tree):
returned = {}
returned['plugin_type'] = xml_tree.tag
returned['name'] = xml_tree.attrib.get('name', None)
if not returned['name']:
raise ParsingException('visualization needs a name attribute')
if 'disabled' in xml_tree.attrib:
log.info('Visualizations plugin disabled: %s. Skipping...', returned['name'])
return None
returned['embeddable'] = False
if 'embeddable' in xml_tree.attrib:
returned['embeddable'] = xml_tree.attrib.get('embeddable', False) == 'true'
# a (for now) text description of what the visualization does
description = xml_tree.find('description')
returned['description'] = description.text.strip() if description is not None else None
# data_sources are the kinds of objects/data associated with the visualization
# e.g. views on HDAs can use this to find out what visualizations are applicable to them
data_sources = []
data_sources_confs = xml_tree.find('data_sources')
for data_source_conf in data_sources_confs.findall('data_source'):
data_source = self.data_source_parser.parse(data_source_conf)
if data_source:
data_sources.append(data_source)
# data_sources are not required
if not data_sources:
raise ParsingException('No valid data_sources for visualization')
returned['data_sources'] = data_sources
# TODO: this is effectively required due to param_confs.findall( 'param' )
# parameters spell out how to convert query string params into resources and data
# that will be parsed, fetched, etc. and passed to the template
# list or dict? ordered or not?
params = {}
param_confs = xml_tree.find('params')
param_elements = param_confs.findall('param') if param_confs is not None else []
for param_conf in param_elements:
param = self.param_parser.parse(param_conf)
if param:
params[param_conf.text] = param
# params are not required
if params:
returned['params'] = params
# param modifiers provide extra information for other params (e.g. hda_ldda='hda' -> dataset_id is an hda id)
# store these modifiers in a 2-level dictionary { target_param: { param_modifier_key: { param_mod_data }
# ugh - wish we didn't need these
param_modifiers = {}
param_modifier_elements = param_confs.findall('param_modifier') if param_confs is not None else []
for param_modifier_conf in param_modifier_elements:
param_modifier = self.param_modifier_parser.parse(param_modifier_conf)
target_param = param_modifier_conf.get('modifies')
param_modifier_key = param_modifier_conf.text
if param_modifier and target_param in params:
if target_param not in param_modifiers:
param_modifiers[target_param] = {}
param_modifiers[target_param][param_modifier_key] = param_modifier
if param_modifiers:
returned['param_modifiers'] = param_modifiers
returned['entry_point'] = self.parse_entry_point(xml_tree)
link_text = xml_tree.find('link_text')
if link_text is not None and link_text.text:
returned['link_text'] = link_text
render_target = xml_tree.find('render_target')
if((render_target is not None and render_target.text) and
(render_target.text in self.VALID_RENDER_TARGETS)):
returned['render_target'] = render_target.text
else:
returned['render_target'] = 'galaxy_main'
return returned
def parse_entry_point(self, xml_tree):
template = xml_tree.find('template')
if template is not None and template.text:
log.info('template syntax is deprecated: use entry_point instead')
return {
'type' : 'mako',
'file' : template.text,
'attr' : {}
}
entry_point = xml_tree.find('entry_point')
if entry_point is None:
raise ParsingException('template or entry_point required')
entry_point_attrib = entry_point.attrib.copy()
entry_point_type = entry_point_attrib.pop('entry_point_type', 'mako')
if entry_point_type not in self.ALLOWED_ENTRY_POINT_TYPES:
raise ParsingException('Unknown entry_point type: ' + entry_point_type)
return {
'type' : entry_point_type,
'file' : entry_point.text,
'attr' : entry_point_attrib
}
class DataSourceParser(object):
ALLOWED_MODEL_CLASSES = [
'Visualization',
'HistoryDatasetAssociation',
'LibraryDatasetDatasetAssociation'
]
ATTRIBUTE_SPLIT_CHAR = '.'
ALLOWED_DATA_SOURCE_ATTRIBUTES = [
'datatype'
]
def parse(self, xml_tree):
returned = {}
model_class = self.parse_model_class(xml_tree.find('model_class'))
if not model_class:
raise ParsingException('data_source needs a model class')
returned['model_class'] = model_class
returned['tests'] = self.parse_tests(xml_tree.findall('test'))
returned['to_params'] = {}
to_params = self.parse_to_params(xml_tree.findall('to_param'))
if to_params:
returned['to_params'] = to_params
return returned
def parse_model_class(self, xml_tree):
if xml_tree is None or not xml_tree.text:
raise ParsingException('data_source entry requires a model_class')
if xml_tree.text not in self.ALLOWED_MODEL_CLASSES:
raise ParsingException('Invalid data_source model_class: %s' % (xml_tree.text))
model_class = getattr(galaxy.model, xml_tree.text, None)
return model_class
def _build_getattr_lambda(self, attr_name_list):
if len(attr_name_list) == 0:
return lambda o: o
next_attr_name = attr_name_list[-1]
if len(attr_name_list) == 1:
return lambda o: getattr(o, next_attr_name)
return lambda o: getattr(self._build_getattr_lambda(attr_name_list[:-1])(o), next_attr_name)
def parse_tests(self, xml_tree_list):
tests = []
if not xml_tree_list:
return tests
for test_elem in xml_tree_list:
test_type = test_elem.get('type', 'eq')
test_result = test_elem.text.strip() if test_elem.text else None
if not test_type or not test_result:
log.warning('Skipping test. Needs both type attribute and text node to be parsed: ' +
'%s, %s' % (test_type, test_elem.text))
continue
test_result = test_result.strip()
test_attr = test_elem.get('test_attr')
test_attr = test_attr.split(self.ATTRIBUTE_SPLIT_CHAR) if isinstance(test_attr, string_types) else []
getter = self._build_getattr_lambda(test_attr)
test_result_type = test_elem.get('result_type', 'string')
if test_type == 'isinstance':
def test_fn(o, result):
return isinstance(getter(o), result)
elif test_type == 'has_dataprovider':
def test_fn(o, result):
return (hasattr(getter(o), 'has_dataprovider') and
getter(o).has_dataprovider(result))
elif test_type == 'has_attribute':
def test_fn(o, result):
return hasattr(getter(o), result)
elif test_type == 'not_eq':
def test_fn(o, result):
return str(getter(o)) != result
else:
def test_fn(o, result):
return str(getter(o)) == result
tests.append({
'type' : test_type,
'result' : test_result,
'result_type' : test_result_type,
'fn' : test_fn
})
return tests
def parse_to_params(self, xml_tree_list):
to_param_dict = {}
if not xml_tree_list:
return to_param_dict
for element in xml_tree_list:
param_name = element.text
if not param_name:
raise ParsingException('to_param requires text (the param name)')
param = {}
assign = element.get('assign')
if assign is not None:
param['assign'] = assign
param_attr = element.get('param_attr')
if param_attr is not None:
param['param_attr'] = param_attr
if not param_attr and not assign:
raise ParsingException('to_param requires either assign or param_attr attributes: %s', param_name)
if param:
to_param_dict[param_name] = param
return to_param_dict
class ParamParser(object):
DEFAULT_PARAM_TYPE = 'str'
def parse(self, xml_tree):
returned = {}
param_key = xml_tree.text
if not param_key:
raise ParsingException('Param entry requires text')
returned['type'] = self.parse_param_type(xml_tree)
# is the parameter required in the template and,
# if not, what is the default value?
required = xml_tree.get('required') == "true"
returned['required'] = required
if not required:
# default defaults to None
default = None
if 'default' in xml_tree.attrib:
default = xml_tree.get('default')
# convert default based on param_type here
returned['default'] = default
# does the param have to be within a list of certain values
# NOTE: the interpretation of this list is deferred till parsing and based on param type
# e.g. it could be 'val in constrain_to', or 'constrain_to is min, max for number', etc.
# TODO: currently unused
constrain_to = xml_tree.get('constrain_to')
if constrain_to:
returned['constrain_to'] = constrain_to.split(',')
# is the param a comma-separated-value list?
returned['csv'] = xml_tree.get('csv') == "true"
# remap keys in the params/query string to the var names used in the template
var_name_in_template = xml_tree.get('var_name_in_template')
if var_name_in_template:
returned['var_name_in_template'] = var_name_in_template
return returned
def parse_param_type(self, xml_tree):
# default to string as param_type
param_type = xml_tree.get('type') or self.DEFAULT_PARAM_TYPE
# TODO: set parsers and validaters, convert here
return param_type
class ParamModifierParser(ParamParser):
def parse(self, element):
# modifies is required
modifies = element.get('modifies')
if not modifies:
raise ParsingException('param_modifier entry requires a target param key (attribute "modifies")')
returned = super(ParamModifierParser, self).parse(element)
return returned
| true | true |
1c320402f1fb2dfd1546245fc84da34912074997 | 798 | py | Python | interactive/mechanism/urls.py | mattldawson/music-box-interactive | 6b2610b4f0f255f0e78e23628dc7ba6cc844d0f4 | [
"Apache-2.0"
] | null | null | null | interactive/mechanism/urls.py | mattldawson/music-box-interactive | 6b2610b4f0f255f0e78e23628dc7ba6cc844d0f4 | [
"Apache-2.0"
] | null | null | null | interactive/mechanism/urls.py | mattldawson/music-box-interactive | 6b2610b4f0f255f0e78e23628dc7ba6cc844d0f4 | [
"Apache-2.0"
] | null | null | null | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.species_home_handler),
path('conditions-species-list', views.conditions_species_list_handler),
path('reactions', views.reactions_home_handler),
path('reaction-detail', views.reaction_detail_handler),
path('reaction-musica-names-list', views.reaction_musica_names_list_handler),
path('reaction-type-schema', views.reaction_type_schema_handler),
path('reaction-remove', views.reaction_remove_handler),
path('reaction-save', views.reaction_save_handler),
path('species', views.species_home_handler),
path('species-detail', views.species_detail_handler),
path('species-remove', views.species_remove_handler),
path('species-save', views.species_save_handler)
]
| 44.333333 | 81 | 0.759398 | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.species_home_handler),
path('conditions-species-list', views.conditions_species_list_handler),
path('reactions', views.reactions_home_handler),
path('reaction-detail', views.reaction_detail_handler),
path('reaction-musica-names-list', views.reaction_musica_names_list_handler),
path('reaction-type-schema', views.reaction_type_schema_handler),
path('reaction-remove', views.reaction_remove_handler),
path('reaction-save', views.reaction_save_handler),
path('species', views.species_home_handler),
path('species-detail', views.species_detail_handler),
path('species-remove', views.species_remove_handler),
path('species-save', views.species_save_handler)
]
| true | true |
1c3204778ce9d06783a0992f4a8fc8455b948397 | 8,955 | py | Python | espnet/nets/pytorch_backend/transducer/rnn_decoder.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/transducer/rnn_decoder.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/transducer/rnn_decoder.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | """RNN decoder definition for Transducer model."""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from espnet.nets.transducer_decoder_interface import (
ExtendedHypothesis,
Hypothesis,
TransducerDecoderInterface,
)
class RNNDecoder(TransducerDecoderInterface, torch.nn.Module):
"""RNN decoder module for Transducer model.
Args:
odim: Output dimension.
dtype: Decoder units type.
dlayers: Number of decoder layers.
dunits: Number of decoder units per layer..
embed_dim: Embedding layer dimension.
dropout_rate: Dropout rate for decoder layers.
dropout_rate_embed: Dropout rate for embedding layer.
blank_id: Blank symbol ID.
"""
def __init__(
self,
odim: int,
dtype: str,
dlayers: int,
dunits: int,
embed_dim: int,
dropout_rate: float = 0.0,
dropout_rate_embed: float = 0.0,
blank_id: int = 0,
):
"""Transducer initializer."""
super().__init__()
self.embed = torch.nn.Embedding(odim, embed_dim, padding_idx=blank_id)
self.dropout_embed = torch.nn.Dropout(p=dropout_rate_embed)
dec_net = torch.nn.LSTM if dtype == "lstm" else torch.nn.GRU
self.decoder = torch.nn.ModuleList(
[dec_net(embed_dim, dunits, 1, batch_first=True)]
)
self.dropout_dec = torch.nn.Dropout(p=dropout_rate)
for _ in range(1, dlayers):
self.decoder += [dec_net(dunits, dunits, 1, batch_first=True)]
self.dlayers = dlayers
self.dunits = dunits
self.dtype = dtype
self.odim = odim
self.ignore_id = -1
self.blank_id = blank_id
self.multi_gpus = torch.cuda.device_count() > 1
def set_device(self, device: torch.device):
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self, batch_size: int
) -> Tuple[torch.Tensor, Optional[torch.tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Initial decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
h_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
if self.dtype == "lstm":
c_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
return (h_n, c_n)
return (h_n, None)
def rnn_forward(
self,
sequence: torch.Tensor,
state: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Encode source label sequences.
Args:
sequence: RNN input sequences. (B, D_emb)
state: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
Returns:
sequence: RNN output sequences. (B, D_dec)
(h_next, c_next): Decoder hidden states. (N, B, D_dec), (N, B, D_dec))
"""
h_prev, c_prev = state
h_next, c_next = self.init_state(sequence.size(0))
for layer in range(self.dlayers):
if self.dtype == "lstm":
sequence, (
h_next[layer : layer + 1],
c_next[layer : layer + 1],
) = self.decoder[layer](
sequence, hx=(h_prev[layer : layer + 1], c_prev[layer : layer + 1])
)
else:
sequence, h_next[layer : layer + 1] = self.decoder[layer](
sequence, hx=h_prev[layer : layer + 1]
)
sequence = self.dropout_dec(sequence)
return sequence, (h_next, c_next)
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences. (B, L)
Returns:
dec_out: Decoder output sequences. (B, T, U, D_dec)
"""
init_state = self.init_state(labels.size(0))
dec_embed = self.dropout_embed(self.embed(labels))
dec_out, _ = self.rnn_forward(dec_embed, init_state)
return dec_out
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, state) for each label sequence. (key)
Returns:
dec_out: Decoder output sequence. (1, D_dec)
new_state: Decoder hidden states. ((N, 1, D_dec), (N, 1, D_dec))
label: Label ID for LM. (1,)
"""
label = torch.full((1, 1), hyp.yseq[-1], dtype=torch.long, device=self.device)
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_emb = self.embed(label)
dec_out, dec_state = self.rnn_forward(dec_emb, hyp.dec_state)
cache[str_labels] = (dec_out, dec_state)
return dec_out[0][0], dec_state, label[0]
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: Tuple[torch.Tensor, Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
cache: Pairs of (dec_out, dec_states) for each label sequences. (keys)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences. (B, D_dec)
dec_states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
lm_labels: Label ID sequences for LM. (B,)
"""
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq[-1], hyp.dec_state))
if process:
labels = torch.LongTensor([[p[1]] for p in process], device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(labels.size(0)), [p[2] for p in process]
)
dec_emb = self.embed(labels)
dec_out, new_states = self.rnn_forward(dec_emb, p_dec_states)
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.cat([d[0] for d in done], dim=0)
dec_states = self.create_batch_states(dec_states, [d[1] for d in done])
if use_lm:
lm_labels = torch.LongTensor([h.yseq[-1] for h in hyps], device=self.device)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: Tuple[torch.Tensor, Optional[torch.Tensor]], idx: int
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID.
((N, 1, D_dec), (N, 1, D_dec))
"""
return (
states[0][:, idx : idx + 1, :],
states[1][:, idx : idx + 1, :] if self.dtype == "lstm" else None,
)
def create_batch_states(
self,
states: Tuple[torch.Tensor, Optional[torch.Tensor]],
new_states: List[Tuple[torch.Tensor, Optional[torch.Tensor]]],
check_list: Optional[List] = None,
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Create decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
new_states: Decoder hidden states. [N x ((1, D_dec), (1, D_dec))]
Returns:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
return (
torch.cat([s[0] for s in new_states], dim=1),
torch.cat([s[1] for s in new_states], dim=1)
if self.dtype == "lstm"
else None,
)
| 30.56314 | 88 | 0.552205 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from espnet.nets.transducer_decoder_interface import (
ExtendedHypothesis,
Hypothesis,
TransducerDecoderInterface,
)
class RNNDecoder(TransducerDecoderInterface, torch.nn.Module):
def __init__(
self,
odim: int,
dtype: str,
dlayers: int,
dunits: int,
embed_dim: int,
dropout_rate: float = 0.0,
dropout_rate_embed: float = 0.0,
blank_id: int = 0,
):
super().__init__()
self.embed = torch.nn.Embedding(odim, embed_dim, padding_idx=blank_id)
self.dropout_embed = torch.nn.Dropout(p=dropout_rate_embed)
dec_net = torch.nn.LSTM if dtype == "lstm" else torch.nn.GRU
self.decoder = torch.nn.ModuleList(
[dec_net(embed_dim, dunits, 1, batch_first=True)]
)
self.dropout_dec = torch.nn.Dropout(p=dropout_rate)
for _ in range(1, dlayers):
self.decoder += [dec_net(dunits, dunits, 1, batch_first=True)]
self.dlayers = dlayers
self.dunits = dunits
self.dtype = dtype
self.odim = odim
self.ignore_id = -1
self.blank_id = blank_id
self.multi_gpus = torch.cuda.device_count() > 1
def set_device(self, device: torch.device):
self.device = device
def init_state(
self, batch_size: int
) -> Tuple[torch.Tensor, Optional[torch.tensor]]:
h_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
if self.dtype == "lstm":
c_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
return (h_n, c_n)
return (h_n, None)
def rnn_forward(
self,
sequence: torch.Tensor,
state: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
h_prev, c_prev = state
h_next, c_next = self.init_state(sequence.size(0))
for layer in range(self.dlayers):
if self.dtype == "lstm":
sequence, (
h_next[layer : layer + 1],
c_next[layer : layer + 1],
) = self.decoder[layer](
sequence, hx=(h_prev[layer : layer + 1], c_prev[layer : layer + 1])
)
else:
sequence, h_next[layer : layer + 1] = self.decoder[layer](
sequence, hx=h_prev[layer : layer + 1]
)
sequence = self.dropout_dec(sequence)
return sequence, (h_next, c_next)
def forward(self, labels: torch.Tensor) -> torch.Tensor:
init_state = self.init_state(labels.size(0))
dec_embed = self.dropout_embed(self.embed(labels))
dec_out, _ = self.rnn_forward(dec_embed, init_state)
return dec_out
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
label = torch.full((1, 1), hyp.yseq[-1], dtype=torch.long, device=self.device)
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_emb = self.embed(label)
dec_out, dec_state = self.rnn_forward(dec_emb, hyp.dec_state)
cache[str_labels] = (dec_out, dec_state)
return dec_out[0][0], dec_state, label[0]
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: Tuple[torch.Tensor, Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq[-1], hyp.dec_state))
if process:
labels = torch.LongTensor([[p[1]] for p in process], device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(labels.size(0)), [p[2] for p in process]
)
dec_emb = self.embed(labels)
dec_out, new_states = self.rnn_forward(dec_emb, p_dec_states)
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.cat([d[0] for d in done], dim=0)
dec_states = self.create_batch_states(dec_states, [d[1] for d in done])
if use_lm:
lm_labels = torch.LongTensor([h.yseq[-1] for h in hyps], device=self.device)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: Tuple[torch.Tensor, Optional[torch.Tensor]], idx: int
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return (
states[0][:, idx : idx + 1, :],
states[1][:, idx : idx + 1, :] if self.dtype == "lstm" else None,
)
def create_batch_states(
self,
states: Tuple[torch.Tensor, Optional[torch.Tensor]],
new_states: List[Tuple[torch.Tensor, Optional[torch.Tensor]]],
check_list: Optional[List] = None,
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
return (
torch.cat([s[0] for s in new_states], dim=1),
torch.cat([s[1] for s in new_states], dim=1)
if self.dtype == "lstm"
else None,
)
| true | true |
1c3204b5c753a39ab2806a24f625af7ab5c53c3e | 2,520 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/datetime_extractor_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 10 | 2019-05-11T18:07:14.000Z | 2021-08-20T03:02:47.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/datetime_extractor_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 1 | 2020-07-10T08:25:36.000Z | 2020-07-10T08:25:36.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/datetime_extractor_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 18 | 2019-08-19T12:11:00.000Z | 2021-10-12T09:36:27.000Z | from typing import Pattern
import regex
from recognizers_text import RegExpUtility
from ...resources.chinese_date_time import ChineseDateTime
from ..extractors import DateTimeExtractor
from ..base_datetime import DateTimeExtractorConfiguration
from .date_extractor import ChineseDateExtractor
from .time_extractor import ChineseTimeExtractor
class ChineseDateTimeExtractorConfiguration(DateTimeExtractorConfiguration):
@property
def date_point_extractor(self) -> DateTimeExtractor:
return self._date_point_extractor
@property
def time_point_extractor(self) -> DateTimeExtractor:
return self._time_point_extractor
@property
def duration_extractor(self) -> DateTimeExtractor:
return None
@property
def suffix_regex(self) -> Pattern:
return None
@property
def now_regex(self) -> Pattern:
return self._now_regex
@property
def time_of_today_after_regex(self) -> Pattern:
return None
@property
def simple_time_of_today_after_regex(self) -> Pattern:
return None
@property
def night_regex(self) -> Pattern:
return self._night_regex
@property
def time_of_today_before_regex(self) -> Pattern:
return self._time_of_today_before_regex
@property
def simple_time_of_today_before_regex(self) -> Pattern:
return None
@property
def specific_end_of_regex(self) -> Pattern:
return None
@property
def unspecific_end_of_regex(self) -> Pattern:
return None
@property
def unit_regex(self) -> Pattern:
return None
@property
def preposition_regex(self) -> Pattern:
return self._preposition_regex
@property
def utility_configuration(self) -> any:
return None
def __init__(self):
self._date_point_extractor = ChineseDateExtractor()
self._time_point_extractor = ChineseTimeExtractor()
self._now_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NowRegex)
self._night_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NightRegex)
self._time_of_today_before_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.TimeOfTodayRegex)
self._preposition_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.PrepositionRegex)
def is_connector_token(self, source: str) -> bool:
return not source.strip() or source == ',' or regex.search(self.preposition_regex, source)
| 28.636364 | 98 | 0.717063 | from typing import Pattern
import regex
from recognizers_text import RegExpUtility
from ...resources.chinese_date_time import ChineseDateTime
from ..extractors import DateTimeExtractor
from ..base_datetime import DateTimeExtractorConfiguration
from .date_extractor import ChineseDateExtractor
from .time_extractor import ChineseTimeExtractor
class ChineseDateTimeExtractorConfiguration(DateTimeExtractorConfiguration):
@property
def date_point_extractor(self) -> DateTimeExtractor:
return self._date_point_extractor
@property
def time_point_extractor(self) -> DateTimeExtractor:
return self._time_point_extractor
@property
def duration_extractor(self) -> DateTimeExtractor:
return None
@property
def suffix_regex(self) -> Pattern:
return None
@property
def now_regex(self) -> Pattern:
return self._now_regex
@property
def time_of_today_after_regex(self) -> Pattern:
return None
@property
def simple_time_of_today_after_regex(self) -> Pattern:
return None
@property
def night_regex(self) -> Pattern:
return self._night_regex
@property
def time_of_today_before_regex(self) -> Pattern:
return self._time_of_today_before_regex
@property
def simple_time_of_today_before_regex(self) -> Pattern:
return None
@property
def specific_end_of_regex(self) -> Pattern:
return None
@property
def unspecific_end_of_regex(self) -> Pattern:
return None
@property
def unit_regex(self) -> Pattern:
return None
@property
def preposition_regex(self) -> Pattern:
return self._preposition_regex
@property
def utility_configuration(self) -> any:
return None
def __init__(self):
self._date_point_extractor = ChineseDateExtractor()
self._time_point_extractor = ChineseTimeExtractor()
self._now_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NowRegex)
self._night_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NightRegex)
self._time_of_today_before_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.TimeOfTodayRegex)
self._preposition_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.PrepositionRegex)
def is_connector_token(self, source: str) -> bool:
return not source.strip() or source == ',' or regex.search(self.preposition_regex, source)
| true | true |
1c320652313fc5533b8b6a4c7546f83353505333 | 2,858 | py | Python | Pricing/Binomial_Tree/streamlit_binomial_tree.py | jingshenghua/QuantTopics | 20e64710f9a4f22b626fa0ac0a7b062baccdc62a | [
"MIT"
] | null | null | null | Pricing/Binomial_Tree/streamlit_binomial_tree.py | jingshenghua/QuantTopics | 20e64710f9a4f22b626fa0ac0a7b062baccdc62a | [
"MIT"
] | null | null | null | Pricing/Binomial_Tree/streamlit_binomial_tree.py | jingshenghua/QuantTopics | 20e64710f9a4f22b626fa0ac0a7b062baccdc62a | [
"MIT"
] | null | null | null | import streamlit as st
from datetime import datetime
import matplotlib.pyplot as plt
from tree_generator import BinomialTree
import numpy as np
st.set_option('deprecation.showPyplotGlobalUse', False)
# headings
month = datetime.now().month
title = "Binomial Tree Option Pricing"
st.title(title + "🌲🎄")
st.sidebar.title("Parameters")
# user inputs on sidebar
S = st.sidebar.number_input('Stock Price (S)', value=100.,)
K = st.sidebar.number_input('Exercise Price (K)', value=100.,)
T = st.sidebar.number_input('Time Periods (T)', value=2., max_value=15.)
dt = st.sidebar.number_input('Time step (dt)', value=1., max_value=15.,step=0.01)
r = st.sidebar.number_input('Inter-period Interest Rate (r)', value=0.05,)
q = st.sidebar.number_input('Dividend Yield (q)', value=0.0,)
sigma = st.sidebar.number_input('stock annualized volatility (sigma)', value=0.1,min_value=0.)
tree = BinomialTree()
tree.fit(r,q,sigma)
price_tree = tree.create_price_tree(S,dt,T)
st.sidebar.write("Stock Upper Factor (u) ", round(tree.u, 3))
st.sidebar.write("Stock Down Factor (d) ", round(tree.d, 3))
st.sidebar.write("Risk Neutral Probability (p) ", round(tree.p, 3))
# back to main body
st.header("*Cox-Ross-Rubinstein (CRR) binomial tree*")
st.markdown("This visualisation aims to explore the dynamics of CRR binomial tree in option pricing. "
"https://en.wikipedia.org/wiki/Binomial_options_pricing_model"
)
st.subheader('Key:')
c1,c2,c3,c4,c5 = st.columns(5)
with c1:
price = st.checkbox('price tree')
with c2:
European_call = st.checkbox('European Call tree')
with c3:
European_put = st.checkbox('European Put tree')
with c4:
American_call = st.checkbox('American Call tree')
with c5:
American_put = st.checkbox('American Put tree')
# plot stock tree
if price:
st.pyplot(tree.plot_tree(price_tree))
if European_call:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(x-K,0))
st.pyplot(tree.plot_tree(payoff))
st.write("European Call price ", round(tree.compute_payoff(payoff), 3))
if European_put:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(K-x,0))
st.pyplot(tree.plot_tree(payoff))
st.write("European put price ", round(tree.compute_payoff(payoff), 3))
if American_call:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(x-K,0),style='American')
st.pyplot(tree.plot_tree(payoff))
st.write("American Call price ", round(tree.compute_payoff(payoff,style='American'), 3))
if American_put:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(K-x,0),style='American')
st.pyplot(tree.plot_tree(payoff))
st.write("American Put price ", round(tree.compute_payoff(payoff,style='American'), 3))
st.subheader("Disclaimer")
st.write("All information aims to provide for educational purposes only and does not constitute financial advice")
| 43.30303 | 114 | 0.730581 | import streamlit as st
from datetime import datetime
import matplotlib.pyplot as plt
from tree_generator import BinomialTree
import numpy as np
st.set_option('deprecation.showPyplotGlobalUse', False)
month = datetime.now().month
title = "Binomial Tree Option Pricing"
st.title(title + "🌲🎄")
st.sidebar.title("Parameters")
S = st.sidebar.number_input('Stock Price (S)', value=100.,)
K = st.sidebar.number_input('Exercise Price (K)', value=100.,)
T = st.sidebar.number_input('Time Periods (T)', value=2., max_value=15.)
dt = st.sidebar.number_input('Time step (dt)', value=1., max_value=15.,step=0.01)
r = st.sidebar.number_input('Inter-period Interest Rate (r)', value=0.05,)
q = st.sidebar.number_input('Dividend Yield (q)', value=0.0,)
sigma = st.sidebar.number_input('stock annualized volatility (sigma)', value=0.1,min_value=0.)
tree = BinomialTree()
tree.fit(r,q,sigma)
price_tree = tree.create_price_tree(S,dt,T)
st.sidebar.write("Stock Upper Factor (u) ", round(tree.u, 3))
st.sidebar.write("Stock Down Factor (d) ", round(tree.d, 3))
st.sidebar.write("Risk Neutral Probability (p) ", round(tree.p, 3))
st.header("*Cox-Ross-Rubinstein (CRR) binomial tree*")
st.markdown("This visualisation aims to explore the dynamics of CRR binomial tree in option pricing. "
"https://en.wikipedia.org/wiki/Binomial_options_pricing_model"
)
st.subheader('Key:')
c1,c2,c3,c4,c5 = st.columns(5)
with c1:
price = st.checkbox('price tree')
with c2:
European_call = st.checkbox('European Call tree')
with c3:
European_put = st.checkbox('European Put tree')
with c4:
American_call = st.checkbox('American Call tree')
with c5:
American_put = st.checkbox('American Put tree')
if price:
st.pyplot(tree.plot_tree(price_tree))
if European_call:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(x-K,0))
st.pyplot(tree.plot_tree(payoff))
st.write("European Call price ", round(tree.compute_payoff(payoff), 3))
if European_put:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(K-x,0))
st.pyplot(tree.plot_tree(payoff))
st.write("European put price ", round(tree.compute_payoff(payoff), 3))
if American_call:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(x-K,0),style='American')
st.pyplot(tree.plot_tree(payoff))
st.write("American Call price ", round(tree.compute_payoff(payoff,style='American'), 3))
if American_put:
payoff = tree.create_payoff_tree(price_tree,lambda x:np.fmax(K-x,0),style='American')
st.pyplot(tree.plot_tree(payoff))
st.write("American Put price ", round(tree.compute_payoff(payoff,style='American'), 3))
st.subheader("Disclaimer")
st.write("All information aims to provide for educational purposes only and does not constitute financial advice")
| true | true |
1c3206ca231af4cca09ce5d975865a86ce75b5f5 | 2,305 | py | Python | sqlalchemy_i18n/__init__.py | matthias-k/sqlalchemy-i18n | d168aa61658ae1f1e01150d0fb086781ab101832 | [
"BSD-3-Clause"
] | null | null | null | sqlalchemy_i18n/__init__.py | matthias-k/sqlalchemy-i18n | d168aa61658ae1f1e01150d0fb086781ab101832 | [
"BSD-3-Clause"
] | null | null | null | sqlalchemy_i18n/__init__.py | matthias-k/sqlalchemy-i18n | d168aa61658ae1f1e01150d0fb086781ab101832 | [
"BSD-3-Clause"
] | null | null | null | import sqlalchemy as sa
from .builders import ImproperlyConfigured
from .manager import translation_base, translation_manager, TranslationManager
from .translatable import Translatable, UnknownLocaleError
__all__ = (
translation_base,
ImproperlyConfigured,
Translatable,
TranslationManager,
translation_manager,
UnknownLocaleError
)
__version__ = '1.0.1'
def make_translatable(
mapper=sa.orm.mapper,
session=sa.orm.session.Session,
manager=translation_manager,
options={}
):
"""
Assigns translation listeners for given mapper and session.
:param mapper:
SQLAlchemy declarative class or mapper to apply translation listeners
into.
:param session:
SQLAlchemy session class.
:param manager:
SQLAlchemy-i18n TranslationManager instance
:param options:
TranslationManager options
"""
manager.options.update(options)
sa.event.listen(
mapper, 'instrument_class', manager.instrument_translation_classes
)
sa.event.listen(
mapper, 'after_configured', manager.configure_translatable_classes
)
sa.event.listen(
session, 'before_flush', manager.auto_create_translations
)
def find_translations(obj, property_name, locale):
class_ = obj.__class__
session = sa.orm.object_session(obj)
translation_class = class_.__translatable__['class']
property_ = getattr(translation_class, property_name)
subquery = (
session.query(translation_class.id)
.filter(
sa.and_(
property_ ==
getattr(obj, property_name),
translation_class.locale ==
obj.locale
)
)
)
conditions = [
translation_class.id.in_(subquery),
translation_class.locale == locale,
property_.isnot(None)
]
total_count = (
session.query(sa.func.cast(sa.func.count('1'), sa.Numeric))
.filter(sa.and_(*conditions))
)
query = (
session.query(
property_.label('translation'),
(sa.func.cast(sa.func.count('1'), sa.Numeric) / total_count)
.label('confidence')
)
.filter(sa.and_(*conditions))
.group_by(property_)
)
return query
| 25.054348 | 78 | 0.646421 | import sqlalchemy as sa
from .builders import ImproperlyConfigured
from .manager import translation_base, translation_manager, TranslationManager
from .translatable import Translatable, UnknownLocaleError
__all__ = (
translation_base,
ImproperlyConfigured,
Translatable,
TranslationManager,
translation_manager,
UnknownLocaleError
)
__version__ = '1.0.1'
def make_translatable(
mapper=sa.orm.mapper,
session=sa.orm.session.Session,
manager=translation_manager,
options={}
):
manager.options.update(options)
sa.event.listen(
mapper, 'instrument_class', manager.instrument_translation_classes
)
sa.event.listen(
mapper, 'after_configured', manager.configure_translatable_classes
)
sa.event.listen(
session, 'before_flush', manager.auto_create_translations
)
def find_translations(obj, property_name, locale):
class_ = obj.__class__
session = sa.orm.object_session(obj)
translation_class = class_.__translatable__['class']
property_ = getattr(translation_class, property_name)
subquery = (
session.query(translation_class.id)
.filter(
sa.and_(
property_ ==
getattr(obj, property_name),
translation_class.locale ==
obj.locale
)
)
)
conditions = [
translation_class.id.in_(subquery),
translation_class.locale == locale,
property_.isnot(None)
]
total_count = (
session.query(sa.func.cast(sa.func.count('1'), sa.Numeric))
.filter(sa.and_(*conditions))
)
query = (
session.query(
property_.label('translation'),
(sa.func.cast(sa.func.count('1'), sa.Numeric) / total_count)
.label('confidence')
)
.filter(sa.and_(*conditions))
.group_by(property_)
)
return query
| true | true |
1c32074128ded1c5400004bf892b57033fd0659d | 9,824 | py | Python | plugins/trezor/clientbase.py | electrummoneroclassic/electrummoneroclassic | 5a0d22ae52a73b41788112c3ff75799c6b45a701 | [
"MIT"
] | 1 | 2018-04-14T19:02:36.000Z | 2018-04-14T19:02:36.000Z | plugins/trezor/clientbase.py | electrummoneroclassic/electrummoneroclassic | 5a0d22ae52a73b41788112c3ff75799c6b45a701 | [
"MIT"
] | 2 | 2021-06-02T02:54:27.000Z | 2021-11-15T17:52:11.000Z | plugins/trezor/clientbase.py | electrummoneroclassic/electrummoneroclassic | 5a0d22ae52a73b41788112c3ff75799c6b45a701 | [
"MIT"
] | null | null | null | import time
from struct import pack
from electrum.i18n import _
from electrum.util import PrintError, UserCancelled
from electrum.keystore import bip39_normalize_passphrase
from electrum.bitcoin import serialize_xpub
class GuiMixin(object):
# Requires: self.proto, self.device
messages = {
3: _("Confirm the transaction output on your {} device"),
4: _("Confirm internal entropy on your {} device to begin"),
5: _("Write down the seed word shown on your {}"),
6: _("Confirm on your {} that you want to wipe it clean"),
7: _("Confirm on your {} device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"{} device"),
10: _("Confirm wallet address on your {} device"),
'default': _("Check your {} device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.FailureType.PinCancelled,
self.types.FailureType.ActionCancelled,
self.types.FailureType.NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message.format(self.device), self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your {}:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your {}.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current {} PIN:")
pin = self.handler.get_pin(msg.format(self.device))
if len(pin) > 9:
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
pin = '' # to cancel below
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if req and hasattr(req, 'on_device') and req.on_device is True:
return self.proto.PassphraseAck()
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your {} will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the bitcoins in the wallet.").format(self.device)
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
ack = self.proto.PassphraseAck(passphrase=passphrase)
length = len(ack.passphrase)
if length > 50:
self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length))
return self.proto.Cancel()
return ack
def callback_PassphraseStateRequest(self, msg):
return self.proto.PassphraseStateAck()
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step {}/24. Enter seed word as explained on "
"your {}:").format(self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class TrezorClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def has_usable_connection_with_device(self):
try:
res = self.ping("electrum pinging device")
assert res == "electrum pinging device"
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
'''Convert bip32 path to list of uint32 integers with prime flags
0/-1/1' -> [0, 0x80000001, 0x80000001]'''
# This code is similar to code in trezorlib where it unforunately
# is not declared as a staticmethod. Our n has an extra element.
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
'''Provided here as in keepkeylib but not trezorlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return serialize_xpub(xtype, node.chain_code, node.public_key, node.depth, self.i4b(node.fingerprint), self.i4b(node.child_num))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your {} device to disable passphrases")
else:
self.msg = _("Confirm on your {} device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your {} device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your {} device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your {} device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your {} device to change your PIN")
else:
self.msg = _("Confirm on your {} device to set a PIN")
self.change_pin(remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(TrezorClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.print_error("clear_session: ignoring error", str(e))
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(TrezorClientBase, self).get_public_node(address_n)
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.print_error("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| 38.225681 | 136 | 0.613599 | import time
from struct import pack
from electrum.i18n import _
from electrum.util import PrintError, UserCancelled
from electrum.keystore import bip39_normalize_passphrase
from electrum.bitcoin import serialize_xpub
class GuiMixin(object):
messages = {
3: _("Confirm the transaction output on your {} device"),
4: _("Confirm internal entropy on your {} device to begin"),
5: _("Write down the seed word shown on your {}"),
6: _("Confirm on your {} that you want to wipe it clean"),
7: _("Confirm on your {} device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"{} device"),
10: _("Confirm wallet address on your {} device"),
'default': _("Check your {} device to continue"),
}
def callback_Failure(self, msg):
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.FailureType.PinCancelled,
self.types.FailureType.ActionCancelled,
self.types.FailureType.NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message.format(self.device), self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your {}:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your {}.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current {} PIN:")
pin = self.handler.get_pin(msg.format(self.device))
if len(pin) > 9:
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
pin = '' # to cancel below
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if req and hasattr(req, 'on_device') and req.on_device is True:
return self.proto.PassphraseAck()
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your {} will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the bitcoins in the wallet.").format(self.device)
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
ack = self.proto.PassphraseAck(passphrase=passphrase)
length = len(ack.passphrase)
if length > 50:
self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length))
return self.proto.Cancel()
return ack
def callback_PassphraseStateRequest(self, msg):
return self.proto.PassphraseStateAck()
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step {}/24. Enter seed word as explained on "
"your {}:").format(self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class TrezorClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api')
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
return self.features.label
def is_initialized(self):
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def has_usable_connection_with_device(self):
try:
res = self.ping("electrum pinging device")
assert res == "electrum pinging device"
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return serialize_xpub(xtype, node.chain_code, node.public_key, node.depth, self.i4b(node.fingerprint), self.i4b(node.child_num))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your {} device to disable passphrases")
else:
self.msg = _("Confirm on your {} device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your {} device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your {} device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your {} device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your {} device to change your PIN")
else:
self.msg = _("Confirm on your {} device to set a PIN")
self.change_pin(remove)
def clear_session(self):
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(TrezorClientBase, self).clear_session()
except BaseException as e:
self.print_error("clear_session: ignoring error", str(e))
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(TrezorClientBase, self).get_public_node(address_n)
def close(self):
self.print_error("closing client")
self.clear_session()
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| true | true |
1c320786f51b632b86c2f6c0d8837796e78e198e | 3,767 | py | Python | cryptodome.py | dougalhatesrabbits/cryptodome | 71d0c40146aec0b5538989c203946ba685c327a7 | [
"MIT"
] | null | null | null | cryptodome.py | dougalhatesrabbits/cryptodome | 71d0c40146aec0b5538989c203946ba685c327a7 | [
"MIT"
] | null | null | null | cryptodome.py | dougalhatesrabbits/cryptodome | 71d0c40146aec0b5538989c203946ba685c327a7 | [
"MIT"
] | null | null | null | from Crypto import Random # use to generate a random byte string of a length we decide
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHA256
from Cryptodome import Random
from Cryptodome.Random import get_random_bytes
# Builtins
import base64
import hashlib
'''
https://tutorialsoverflow.com/python-encryption-and-decryption/
'''
"""
# Block sizes for AES encryption is 16 bytes or 128 bits. When AES encryption taking place it will divide our data
# into blocks of length 16. This is a fixed size. So what if your data is smaller than the blocksize ? That’s where
# padding comes into play. Now we need to create a padding function. And also we need to create a unpadding function
# so that we can remove the padding during our encryption process.
"""
BS = 16
# pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
# unpad = lambda s: s[0:-s[-1]]
def pad(s):
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(s):
return s[0:-s[-1]]
class AESCipher:
def __init__(self, key):
self.key = hashlib.sha256(key.encode('utf-8')).digest()
def encrypt(self, raw):
raw = pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw.encode('utf8')))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc[16:]))
cipher = AESCipher('mysecretpassword')
encrypted = cipher.encrypt('Secret Message A')
decrypted = cipher.decrypt(encrypted)
print(encrypted)
print(decrypted)
# https://stackoverflow.com/questions/42568262/how-to-encrypt-text-with-a-password-in-python/44212550#44212550
# Here's how to do it properly in CBC mode, including PKCS#7 padding:
def encrypt(key, source, encode=True):
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = Random.new().read(AES.block_size) # generate IV
encryptor = AES.new(key, AES.MODE_CBC, IV)
padding = AES.block_size - len(source) % AES.block_size # calculate needed padding
source += bytes([padding]) * padding # Python 2.x: source += chr(padding) * padding
data = IV + encryptor.encrypt(source) # store the IV at the beginning and encrypt
return base64.b64encode(data).decode("latin-1") if encode else data
def decrypt(key, source, decode=True):
if decode:
source = base64.b64decode(source.encode("latin-1"))
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = source[:AES.block_size] # extract the IV from the beginning
decryptor = AES.new(key, AES.MODE_CBC, IV)
data = decryptor.decrypt(source[AES.block_size:]) # decrypt
padding = data[-1] # pick the padding value from the end; Python 2.x: ord(data[-1])
if data[-padding:] != bytes([padding]) * padding: # Python 2.x: chr(padding) * padding
raise ValueError("Invalid padding...")
return data[:-padding] # remove the padding
# Now if you test it as:
my_password = b"secret_AES_key_string_to_encrypt/decrypt_with"
my_data = b"input_string_to_encrypt/decrypt"
print("key: {}".format(my_password))
print("data: {}".format(my_data))
encrypted = encrypt(my_password, my_data)
print("\nenc: {}".format(encrypted))
decrypted = decrypt(my_password, encrypted)
print("dec: {}".format(decrypted))
print("\ndata match: {}".format(my_data == decrypted))
print("\nSecond round....")
encrypted = encrypt(my_password, my_data)
print("\nenc: {}".format(encrypted))
decrypted = decrypt(my_password, encrypted)
print("dec: {}".format(decrypted))
print("\ndata match: {}".format(my_data == decrypted))
| 35.87619 | 116 | 0.692328 | from Crypto import Random
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHA256
from Cryptodome import Random
from Cryptodome.Random import get_random_bytes
import base64
import hashlib
BS = 16
def pad(s):
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(s):
return s[0:-s[-1]]
class AESCipher:
def __init__(self, key):
self.key = hashlib.sha256(key.encode('utf-8')).digest()
def encrypt(self, raw):
raw = pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw.encode('utf8')))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc[16:]))
cipher = AESCipher('mysecretpassword')
encrypted = cipher.encrypt('Secret Message A')
decrypted = cipher.decrypt(encrypted)
print(encrypted)
print(decrypted)
crypt(key, source, encode=True):
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = Random.new().read(AES.block_size) # generate IV
encryptor = AES.new(key, AES.MODE_CBC, IV)
padding = AES.block_size - len(source) % AES.block_size # calculate needed padding
source += bytes([padding]) * padding # Python 2.x: source += chr(padding) * padding
data = IV + encryptor.encrypt(source) # store the IV at the beginning and encrypt
return base64.b64encode(data).decode("latin-1") if encode else data
def decrypt(key, source, decode=True):
if decode:
source = base64.b64decode(source.encode("latin-1"))
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = source[:AES.block_size] # extract the IV from the beginning
decryptor = AES.new(key, AES.MODE_CBC, IV)
data = decryptor.decrypt(source[AES.block_size:]) # decrypt
padding = data[-1] # pick the padding value from the end; Python 2.x: ord(data[-1])
if data[-padding:] != bytes([padding]) * padding: # Python 2.x: chr(padding) * padding
raise ValueError("Invalid padding...")
return data[:-padding] # remove the padding
# Now if you test it as:
my_password = b"secret_AES_key_string_to_encrypt/decrypt_with"
my_data = b"input_string_to_encrypt/decrypt"
print("key: {}".format(my_password))
print("data: {}".format(my_data))
encrypted = encrypt(my_password, my_data)
print("\nenc: {}".format(encrypted))
decrypted = decrypt(my_password, encrypted)
print("dec: {}".format(decrypted))
print("\ndata match: {}".format(my_data == decrypted))
print("\nSecond round....")
encrypted = encrypt(my_password, my_data)
print("\nenc: {}".format(encrypted))
decrypted = decrypt(my_password, encrypted)
print("dec: {}".format(decrypted))
print("\ndata match: {}".format(my_data == decrypted))
| true | true |
1c3209001d306c11f2a53706283e9bff8107721b | 4,496 | py | Python | src/sentry/runner/commands/devserver.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | 4 | 2016-03-16T07:21:36.000Z | 2017-09-04T07:29:56.000Z | src/sentry/runner/commands/devserver.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/runner/commands/devserver.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | """
sentry.runner.commands.devserver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration, log_options
@click.command()
@click.option('--reload/--no-reload', default=True, help='Autoreloading of python files.')
@click.option('--watchers/--no-watchers', default=True, help='Watch static files and recompile on changes.')
@click.option('--workers/--no-workers', default=False, help='Run asynchronous workers.')
@click.argument('bind', default='127.0.0.1:8000', metavar='ADDRESS')
@log_options()
@configuration
def devserver(reload, watchers, workers, bind):
"Starts a lightweight web server for development."
if ':' in bind:
host, port = bind.split(':', 1)
port = int(port)
else:
host = bind
port = None
import os
from django.conf import settings
from sentry import options
from sentry.services.http import SentryHTTPServer
url_prefix = options.get('system.url-prefix', '')
needs_https = url_prefix.startswith('https://')
has_https = False
if needs_https:
from subprocess import check_output
try:
check_output(['which', 'https'])
has_https = True
except Exception:
has_https = False
from sentry.runner.initializer import show_big_error
show_big_error([
'missing `https` on your `$PATH`, but https is needed',
'`$ brew install mattrobenolt/stuff/https`',
])
uwsgi_overrides = {
# Make sure we don't try and use uwsgi protocol
'protocol': 'http',
# Make sure we reload really quickly for local dev in case it
# doesn't want to shut down nicely on it's own, NO MERCY
'worker-reload-mercy': 2,
# We need stdin to support pdb in devserver
'honour-stdin': True,
}
if reload:
uwsgi_overrides['py-autoreload'] = 1
daemons = []
if watchers:
daemons += settings.SENTRY_WATCHERS
if workers:
if settings.CELERY_ALWAYS_EAGER:
raise click.ClickException('Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers.')
daemons += [
('worker', ['sentry', 'run', 'worker', '-c', '1', '--autoreload']),
('cron', ['sentry', 'run', 'cron', '--autoreload']),
]
if needs_https and has_https:
from urlparse import urlparse
parsed_url = urlparse(url_prefix)
https_port = str(parsed_url.port or 443)
https_host = parsed_url.hostname
# Determine a random port for the backend http server
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, 0))
port = s.getsockname()[1]
s.close()
bind = '%s:%d' % (host, port)
daemons += [
('https', ['https', '-host', https_host, '-listen', host + ':' + https_port, bind]),
]
# A better log-format for local dev when running through honcho,
# but if there aren't any other daemons, we don't want to override.
if daemons:
uwsgi_overrides['log-format'] = '"%(method) %(uri) %(proto)" %(status) %(size)'
else:
uwsgi_overrides['log-format'] = '[%(ltime)] "%(method) %(uri) %(proto)" %(status) %(size)'
server = SentryHTTPServer(host=host, port=port, workers=1, extra_options=uwsgi_overrides)
# If we don't need any other daemons, just launch a normal uwsgi webserver
# and avoid dealing with subprocesses
if not daemons:
return server.run()
import sys
from subprocess import list2cmdline
from honcho.manager import Manager
os.environ['PYTHONUNBUFFERED'] = 'true'
# Make sure that the environment is prepared before honcho takes over
# This sets all the appropriate uwsgi env vars, etc
server.prepare_environment()
daemons += [
('server', ['sentry', 'run', 'web']),
]
cwd = os.path.realpath(os.path.join(settings.PROJECT_ROOT, os.pardir, os.pardir))
manager = Manager()
for name, cmd in daemons:
manager.add_process(
name, list2cmdline(cmd),
quiet=False, cwd=cwd,
)
manager.loop()
sys.exit(manager.returncode)
| 33.058824 | 109 | 0.623888 | from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration, log_options
@click.command()
@click.option('--reload/--no-reload', default=True, help='Autoreloading of python files.')
@click.option('--watchers/--no-watchers', default=True, help='Watch static files and recompile on changes.')
@click.option('--workers/--no-workers', default=False, help='Run asynchronous workers.')
@click.argument('bind', default='127.0.0.1:8000', metavar='ADDRESS')
@log_options()
@configuration
def devserver(reload, watchers, workers, bind):
if ':' in bind:
host, port = bind.split(':', 1)
port = int(port)
else:
host = bind
port = None
import os
from django.conf import settings
from sentry import options
from sentry.services.http import SentryHTTPServer
url_prefix = options.get('system.url-prefix', '')
needs_https = url_prefix.startswith('https://')
has_https = False
if needs_https:
from subprocess import check_output
try:
check_output(['which', 'https'])
has_https = True
except Exception:
has_https = False
from sentry.runner.initializer import show_big_error
show_big_error([
'missing `https` on your `$PATH`, but https is needed',
'`$ brew install mattrobenolt/stuff/https`',
])
uwsgi_overrides = {
'protocol': 'http',
# Make sure we reload really quickly for local dev in case it
# doesn't want to shut down nicely on it's own, NO MERCY
'worker-reload-mercy': 2,
# We need stdin to support pdb in devserver
'honour-stdin': True,
}
if reload:
uwsgi_overrides['py-autoreload'] = 1
daemons = []
if watchers:
daemons += settings.SENTRY_WATCHERS
if workers:
if settings.CELERY_ALWAYS_EAGER:
raise click.ClickException('Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers.')
daemons += [
('worker', ['sentry', 'run', 'worker', '-c', '1', '--autoreload']),
('cron', ['sentry', 'run', 'cron', '--autoreload']),
]
if needs_https and has_https:
from urlparse import urlparse
parsed_url = urlparse(url_prefix)
https_port = str(parsed_url.port or 443)
https_host = parsed_url.hostname
# Determine a random port for the backend http server
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, 0))
port = s.getsockname()[1]
s.close()
bind = '%s:%d' % (host, port)
daemons += [
('https', ['https', '-host', https_host, '-listen', host + ':' + https_port, bind]),
]
# A better log-format for local dev when running through honcho,
# but if there aren't any other daemons, we don't want to override.
if daemons:
uwsgi_overrides['log-format'] = '"%(method) %(uri) %(proto)" %(status) %(size)'
else:
uwsgi_overrides['log-format'] = '[%(ltime)] "%(method) %(uri) %(proto)" %(status) %(size)'
server = SentryHTTPServer(host=host, port=port, workers=1, extra_options=uwsgi_overrides)
# If we don't need any other daemons, just launch a normal uwsgi webserver
if not daemons:
return server.run()
import sys
from subprocess import list2cmdline
from honcho.manager import Manager
os.environ['PYTHONUNBUFFERED'] = 'true'
server.prepare_environment()
daemons += [
('server', ['sentry', 'run', 'web']),
]
cwd = os.path.realpath(os.path.join(settings.PROJECT_ROOT, os.pardir, os.pardir))
manager = Manager()
for name, cmd in daemons:
manager.add_process(
name, list2cmdline(cmd),
quiet=False, cwd=cwd,
)
manager.loop()
sys.exit(manager.returncode)
| true | true |
1c320ae3be5e3bae0e53964b1f5a4e7025074013 | 1,927 | py | Python | demos/anisotropic_distribution.py | nids2001/UncertainSCI | b3105bddc064575477589d7a930c71fa3149ef36 | [
"MIT"
] | 1 | 2021-07-25T17:02:36.000Z | 2021-07-25T17:02:36.000Z | demos/anisotropic_distribution.py | nids2001/UncertainSCI | b3105bddc064575477589d7a930c71fa3149ef36 | [
"MIT"
] | 70 | 2020-04-09T17:38:12.000Z | 2022-03-18T17:06:09.000Z | demos/anisotropic_distribution.py | nids2001/UncertainSCI | b3105bddc064575477589d7a930c71fa3149ef36 | [
"MIT"
] | 7 | 2020-05-28T17:26:05.000Z | 2021-08-13T21:41:10.000Z | # Demonstrates generation of anisotropic distributions. Example is similar to
# quantiles.py demo.
import numpy as np
from matplotlib import pyplot as plt
from UncertainSCI.distributions import BetaDistribution
from UncertainSCI.model_examples import sine_modulation
from UncertainSCI.indexing import TotalDegreeSet
from UncertainSCI.pce import PolynomialChaosExpansion
# Specifies 1D distribution on [0,1] (alpha=beta=1 ---> uniform)
alpha = [1., 2., 3.]
beta = [3., 2., 1.]
dist = BetaDistribution(alpha, beta)
# Indices setup
order = 5 # polynomial degree
index_set = TotalDegreeSet(dim=dist.dim, order=order)
# # The remainder of this is essentially the same as quantiles.py
print('This will query the model {0:d} times'.format(index_set.get_indices().shape[0] + 10))
# Initializes a pce object
pce = PolynomialChaosExpansion(index_set, dist)
# Define model
N = 10 # Number of degrees of freedom of model output
left = -1.
right = 1.
x = np.linspace(left, right, N)
model = sine_modulation(N=N)
# Compute PCE (runs model)
lsq_residuals = pce.build_pce_wafp(model)
Q = 6 # Number of quantile bands to plot
dq = 0.5/(Q+1)
q_lower = np.arange(dq, 0.5-1e-7, dq)[::-1]
q_upper = np.arange(0.5 + dq, 1.0-1e-7, dq)
# Meh, this triple calling is wasteful
median = pce.quantile(0.5, M=int(1e3))[0, :]
quantiles_lower = pce.quantile(q_lower, M=int(1e3))
quantiles_upper = pce.quantile(q_upper, M=int(1e3))
# # Visualization
M = 50 # Generate MC samples
p_phys = dist.MC_samples(M)
output = np.zeros([M, N])
for j in range(M):
output[j, :] = model(p_phys[j, :])
plt.plot(x, output[:M, :].T, 'k', alpha=0.8, linewidth=0.2)
plt.plot(x, median, 'b', label='PCE median')
for ind in range(Q):
alpha = (Q-ind) * 1/Q - (1/(2*Q))
plt.fill_between(x, quantiles_lower[ind, :], quantiles_upper[ind, :], interpolate=True, facecolor='red', alpha=alpha)
plt.xlabel('x')
plt.legend(loc='lower right')
plt.show()
| 27.140845 | 121 | 0.709912 |
import numpy as np
from matplotlib import pyplot as plt
from UncertainSCI.distributions import BetaDistribution
from UncertainSCI.model_examples import sine_modulation
from UncertainSCI.indexing import TotalDegreeSet
from UncertainSCI.pce import PolynomialChaosExpansion
alpha = [1., 2., 3.]
beta = [3., 2., 1.]
dist = BetaDistribution(alpha, beta)
order = 5
index_set = TotalDegreeSet(dim=dist.dim, order=order)
t.get_indices().shape[0] + 10))
pce = PolynomialChaosExpansion(index_set, dist)
N = 10
left = -1.
right = 1.
x = np.linspace(left, right, N)
model = sine_modulation(N=N)
lsq_residuals = pce.build_pce_wafp(model)
Q = 6
dq = 0.5/(Q+1)
q_lower = np.arange(dq, 0.5-1e-7, dq)[::-1]
q_upper = np.arange(0.5 + dq, 1.0-1e-7, dq)
median = pce.quantile(0.5, M=int(1e3))[0, :]
quantiles_lower = pce.quantile(q_lower, M=int(1e3))
quantiles_upper = pce.quantile(q_upper, M=int(1e3))
s = dist.MC_samples(M)
output = np.zeros([M, N])
for j in range(M):
output[j, :] = model(p_phys[j, :])
plt.plot(x, output[:M, :].T, 'k', alpha=0.8, linewidth=0.2)
plt.plot(x, median, 'b', label='PCE median')
for ind in range(Q):
alpha = (Q-ind) * 1/Q - (1/(2*Q))
plt.fill_between(x, quantiles_lower[ind, :], quantiles_upper[ind, :], interpolate=True, facecolor='red', alpha=alpha)
plt.xlabel('x')
plt.legend(loc='lower right')
plt.show()
| true | true |
1c320b1e440389a8b19643a86cd2a8d9b42b3eae | 1,486 | py | Python | src/components/game-server/lib/data/model/tictactoe/tictactoe_factory.py | rorik/UBU-DMS | 7c3fc38823478054499e233dd36b8b4430d3f3d3 | [
"MIT"
] | null | null | null | src/components/game-server/lib/data/model/tictactoe/tictactoe_factory.py | rorik/UBU-DMS | 7c3fc38823478054499e233dd36b8b4430d3f3d3 | [
"MIT"
] | null | null | null | src/components/game-server/lib/data/model/tictactoe/tictactoe_factory.py | rorik/UBU-DMS | 7c3fc38823478054499e233dd36b8b4430d3f3d3 | [
"MIT"
] | 1 | 2020-02-07T11:36:04.000Z | 2020-02-07T11:36:04.000Z | from lib.data.model.shared.abstract_factory import AbstractFactory
from lib.data.model.tictactoe.tictactoe_gamemaster import TicTacToeGameMaster
from lib.data.model.tictactoe.tictactoe_board import TicTacToeBoard
class TicTacToeFactory(AbstractFactory):
def __init__(self):
super().__init__()
def _build(self, size) -> TicTacToeGameMaster:
if size is None:
size = self._get_default_size()
board_size = -1
win_size = -1
if isinstance(size, str):
if size.isdecimal():
board_size = int(size)
win_size = 3
elif ',' in size:
groups = [value.strip() for value in size.split(',')]
if len(groups) == 2 and len([group for group in groups if not group.isdecimal()]) == 0:
board_size = int(groups[0])
win_size = int(groups[1])
elif isinstance(size, list) and len(size) > 0:
board_size = size[0]
win_size = size[1] if len(size) > 1 else 3
if board_size <= 0 or win_size <= 0:
raise AttributeError('size must be a list (board size and optional win_size) or a string' +
'(board size or both attributes in csv format). Both values must be non-zero positive integers.')
board = TicTacToeBoard(board_size)
return TicTacToeGameMaster(board, win_size)
def _get_default_size(self):
return [3, 3]
| 37.15 | 130 | 0.600942 | from lib.data.model.shared.abstract_factory import AbstractFactory
from lib.data.model.tictactoe.tictactoe_gamemaster import TicTacToeGameMaster
from lib.data.model.tictactoe.tictactoe_board import TicTacToeBoard
class TicTacToeFactory(AbstractFactory):
def __init__(self):
super().__init__()
def _build(self, size) -> TicTacToeGameMaster:
if size is None:
size = self._get_default_size()
board_size = -1
win_size = -1
if isinstance(size, str):
if size.isdecimal():
board_size = int(size)
win_size = 3
elif ',' in size:
groups = [value.strip() for value in size.split(',')]
if len(groups) == 2 and len([group for group in groups if not group.isdecimal()]) == 0:
board_size = int(groups[0])
win_size = int(groups[1])
elif isinstance(size, list) and len(size) > 0:
board_size = size[0]
win_size = size[1] if len(size) > 1 else 3
if board_size <= 0 or win_size <= 0:
raise AttributeError('size must be a list (board size and optional win_size) or a string' +
'(board size or both attributes in csv format). Both values must be non-zero positive integers.')
board = TicTacToeBoard(board_size)
return TicTacToeGameMaster(board, win_size)
def _get_default_size(self):
return [3, 3]
| true | true |
1c320b6b9517633e2d1bc6c012f0d1a7f77e9a2b | 2,734 | py | Python | infra/bots/recipes/g3_compile.py | umberto-sonnino/skia | 7ecc54217889025b3e0c512f92fb84d20a26b9f7 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T23:24:57.000Z | 2021-04-09T23:24:57.000Z | infra/bots/recipes/g3_compile.py | umberto-sonnino/skia | 7ecc54217889025b3e0c512f92fb84d20a26b9f7 | [
"BSD-3-Clause"
] | 1 | 2019-11-22T15:25:32.000Z | 2019-11-22T15:25:32.000Z | infra/bots/recipes/g3_compile.py | promoter/skia | bc5ed776134c60ae13d22cabc8e0f6aca0fdd422 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'vars',
]
def RunSteps(api):
api.vars.setup()
if not api.vars.is_trybot:
raise Exception('%s can only be run as a trybot.' % api.vars.builder_name)
infrabots_dir = api.path['start_dir'].join('skia', 'infra', 'bots')
trigger_wait_g3_script = infrabots_dir.join('g3_compile',
'trigger_wait_g3_task.py')
output_dir = api.path.mkdtemp('g3_try')
output_file = output_dir.join('output_file')
# Trigger a compile task and wait for it to complete.
cmd = ['python', trigger_wait_g3_script,
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
'--output_file', output_file,
]
try:
api.step('Trigger and wait for g3 compile task', cmd=cmd)
except api.step.StepFailure as e:
# Add CL link if it exists in the output_file.
task_json = api.file.read_json(
'Read task json', output_file, test_data={'cl': 12345})
if task_json.get('cl'):
api.step.active_result.presentation.links['CL link'] = (
'http://cl/%d' % task_json['cl'])
raise e
def GenTests(api):
yield(
api.test('g3_compile_trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
)
)
yield(
api.test('g3_compile_trybot_failure') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
) +
api.step_data('Trigger and wait for g3 compile task', retcode=1)
)
yield(
api.test('g3_compile_nontrybot') +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
) +
api.expect_exception('Exception')
)
| 30.719101 | 78 | 0.64338 |
DEPS = [
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'vars',
]
def RunSteps(api):
api.vars.setup()
if not api.vars.is_trybot:
raise Exception('%s can only be run as a trybot.' % api.vars.builder_name)
infrabots_dir = api.path['start_dir'].join('skia', 'infra', 'bots')
trigger_wait_g3_script = infrabots_dir.join('g3_compile',
'trigger_wait_g3_task.py')
output_dir = api.path.mkdtemp('g3_try')
output_file = output_dir.join('output_file')
cmd = ['python', trigger_wait_g3_script,
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
'--output_file', output_file,
]
try:
api.step('Trigger and wait for g3 compile task', cmd=cmd)
except api.step.StepFailure as e:
task_json = api.file.read_json(
'Read task json', output_file, test_data={'cl': 12345})
if task_json.get('cl'):
api.step.active_result.presentation.links['CL link'] = (
'http://cl/%d' % task_json['cl'])
raise e
def GenTests(api):
yield(
api.test('g3_compile_trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
)
)
yield(
api.test('g3_compile_trybot_failure') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
) +
api.step_data('Trigger and wait for g3 compile task', retcode=1)
)
yield(
api.test('g3_compile_nontrybot') +
api.properties(
buildername='Build-Debian9-Clang-TAP-Presubmit-G3_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
) +
api.expect_exception('Exception')
)
| true | true |
1c320be6ba63c28d56b3362c3d113f00860e4ccb | 2,221 | py | Python | learn/prog/01/01.py | git001/milq | d4ca676a72e5d09842bbbc592e54f9b73a05894a | [
"MIT"
] | 2 | 2020-05-20T22:26:34.000Z | 2021-04-24T20:23:32.000Z | learn/prog/01/01.py | VirgoCoachman/milq | ee794087759d4a0cbd8f830bc42976fdf44b5483 | [
"MIT"
] | null | null | null | learn/prog/01/01.py | VirgoCoachman/milq | ee794087759d4a0cbd8f830bc42976fdf44b5483 | [
"MIT"
] | 1 | 2017-09-15T01:52:52.000Z | 2017-09-15T01:52:52.000Z |
# This is a comment
'''
This is a comment
on multiple lines
'''
# VARIABLES
#
# A variable is a symbol that represents a quantity that may vary.
#
# $identifier = value;
age = 25 # The value 25 is assigned to variable age
# BASIC DATA TYPES
age = 25 # Integer
temperature = -3.82 # Real number
name = 'Nacho López' # String
has_car = True # Boolean (only two values: True or False)
# ARITHMETIC OPERATIONS WITH NUMBERS
x = 5
y = 2
z = x + y # Addition. Result: 7.
z = x - y # Subtraction. Result: 3.
z = x * y # Multiplication. Result: 10.
z = x / y # Division. Result: 2.5.
z = x % y # Modulo (remainder of the integer division). Result: 1.
z = z + 1 # Increase the value of z by 1. Result: 2.
z = z - 1 # Decrease the value of z by 1. Result: 1.
z = 50 - x * 6 / -0.5 #
z = (50 - x) * 6 / -0.5 # The order of operations is as in mathematics
z = (50 - x * 6) / -0.5 #
z = 2 * z + 3 # Remember: the symbol = assigns a value to the variable
# BASIC OPERATIONS WITH STRINGS
a = 'GNU/'
b = 'Linux'
c = a + b # Concatenation Result: 'GNU/Linux'.
c = a * 3 # Repetition Result: 'GNU/GNU/GNU/'.
# PRINT VARIABLES ON SCREEN
print('Hello, world!') # Prints on screen: Hello, world!
print(x) # Prints the variable x
# You can print on screen strings and variables
print('I have bought', x, 'oranges and', y, 'lemons.')
# DATA TYPE CONVERSION
height = '95.4'
print(type(height)) # Prints the current data type
height = float(height) # Convert a string to a real number
print(type(height))
altitude = -544.432
print(type(altitude))
altitude = str(altitude) # Convert a real number to string
print(type(altitude))
| 27.7625 | 89 | 0.481765 |
age = 25
age = 25
temperature = -3.82
name = 'Nacho López'
has_car = True
x = 5
y = 2
z = x + y
z = x - y
z = x * y
z = x / y
z = x % y
z = z + 1
z = z - 1
z = 50 - x * 6 / -0.5
z = (50 - x) * 6 / -0.5
z = (50 - x * 6) / -0.5
z = 2 * z + 3
a = 'GNU/'
b = 'Linux'
c = a + b
c = a * 3
print('Hello, world!')
print(x)
print('I have bought', x, 'oranges and', y, 'lemons.')
height = '95.4'
print(type(height))
height = float(height)
print(type(height))
altitude = -544.432
print(type(altitude))
altitude = str(altitude)
print(type(altitude))
| true | true |
1c320bfc84a9615be392b8014ec39ae39c885a21 | 31,893 | py | Python | conference.py | xuemeiwei/Conference-Central_Udacity | e1e7d94f9c18d772cb12da8e943b4e39feeda7c4 | [
"Apache-2.0"
] | null | null | null | conference.py | xuemeiwei/Conference-Central_Udacity | e1e7d94f9c18d772cb12da8e943b4e39feeda7c4 | [
"Apache-2.0"
] | null | null | null | conference.py | xuemeiwei/Conference-Central_Udacity | e1e7d94f9c18d772cb12da8e943b4e39feeda7c4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import *
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
typeOfSession=messages.StringField(1),
websafeConferenceKey=messages.StringField(2),
)
SESSION_GET_BY_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Session objects - - - - - - - - - - - - - - - - - - -
def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date to date string; just copy others
if field.name.endswith('date') or field.name.endswith('startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "sessionSafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create session object"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException('Session name is required')
# Get conference object
c_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = c_key.get()
# Check the validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Check the validity of user
if conf.organizerUserId != getUserId(endpoints.get_current_user()):
raise endpoints.ForbiddenException('Only the organizer can create a session.')
# Copy SessionForm
data = {field.name:getattr(request, field.name) for field in request.all_fields()}
# Convert date and time from strings to Date objects;
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:10], "%H, %M").time()
# Assign each session with Conference as parent
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
s_key = ndb.Key(Session, s_id, parent=c_key)
data['Key'] = s_key
data['websafeConferenceKey'] = request.websafeConferenceKey
del data['sessionSafeKey']
# Save session into database
Session(**data).put()
# Send confirmation email to owner
taskqueue.add(params={'email':user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_Confirmation_session_email'
)
return request
@endpoints.method(SessionForm, SessionForm,
path='createSession',
http_method='POST',
name='createSession')
def createSession(self, request):
"""Create a session in a given conference; Open only to organizer"""
return self._createSessionObject(request)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a conference return all sessions"""
# Get conference key
wsck = request.websafeConferenceKey
# Fetch conference with target key
conf = ndb.Key(urlsage=wsck).get()
# Check validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Create query for this conference
Sessions = Session.query().filter(Session.websafeConferenceKey==wsck)
# Return set of SessionForm objects for each conference
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(SESSION_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/{typeOfSession}',
http_method='GET', name='getConferenceSessionByType')
def getConferenceSessionByType(self, request):
"""Return all sessions of a given type, e.g. lecture, keynote, workshop"""
# Get type of session
typeOfSession = request.typeOfSession
# Fetch conference with target key
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# Check the validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Create ancestor query for all key matches
sessions = Session.query().filter(Session.typeOfSession==typeOfSession,
Session.websafeConferenceKey==request.websafeConferenceKey)
# Return set of SessionForm objects
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(SESSION_GET_BY_SPEAKER_REQUEST, SessionForms,
path='/sessions/{speaker}',
http_method='GET', name='getSessionBySpeaker')
def getSessionsBySpeaker(self, request):
"""Return all sessions of a given speaker"""
sessions= Session.query().filter(Session.speaker==request.speaker)
# Return set of SessionForm objects
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(CONF_GET_REQUEST, ProfileForms,
path='/getAttendeesByConference/{websafeConferenceKey}',
http_method='GET', name='getAttendeesByConference')
def getAttendeesByConference(self, request):
"""Return all attendees of a given conference"""
profiles = Profile.query()
attendees = []
for pro in profiles:
if request.websafeConferenceKey in pro.conferenceKeysToAttend:
attendees.append(pro)
return ProfileForms(items=[self._copyProfileToForm(attendee)
for attendee in attendees])
@endpoints.method(SESSION_REQUEST, ProfileForms,
path='/getAttendeesBySession/{sessionKey}',
http_method='GET', name='GetAttendeesBySession')
def getAttendeesBySession(self, request):
"""Return all attendees of a given session"""
s_Key = request.sessionKey
profiles = Profile.query()
attendees = []
for pro in profiles:
if s_Key in pro.sessionKeysInWishlist:
attendees.append(pro)
return ProfileForms(items=[self._copyProfileToForm(attendee)
for attendee in attendees])
@endpoints.method(SESSION_REQUEST, SessionForm,
path='addSessionToWishlist',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add sessions of interest to wishlist"""
# Get session key
s_Key = request.sessionKey
# Get session object
session = ndb.Key(urlsafe=s_Key).get()
# Check the validity of session
if not session:
raise endpoints.NotFoundException('No session is found')
# Check the validity of user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
profile = self._getProfileFromUser()
if not profile:
raise endpoints.BadRequestException('Profile does not exist for user')
# Check key and Session
if not type(ndb.Key(urlsafe=sessionKey).get()) == Session:
raise endpoints.NotFoundException('This key is not a Session instance')
# Add session to wishlist
if sessionKey not in profile.sessionKeysInWishlist:
try:
profile.sessionKeysInWishlist.append(sessionKey)
profile.put()
except Exception:
raise endpoints.InternalServerErrorException('Error in storing the wishlist')
return self._copySessionToForm(session)
@endpoints.method(message_types.VoidMessage,SessionForms,
path='getSessionsInWishlist', http_method='GET',
name='getSessionsInWishlist')
def getSessionsInWishlist(self,request):
"""Query all the sessions in a conference that the user is interested in."""
profile = self._getProfileFromUser()
if not profile:
raise endpoints.BadRequestException('Profile does not exist for user')
# Get all session keys
sessionkeys = [ndb.Key(urlsafe=sessionkey) for sessionkey in profile.sessionKeysInWishlist]
sessions = ndb.get_multi(sessionkeys)
# Return set of SessionForm objects per conference
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(message_types.VoidMessage,BooleanMessage,
path='clearData', http_method='GET',
name='clearData')
def clearData(self,request):
"""Clear all the data saved."""
ndb.delete_multi(Session.query().fetch(keys_only = True))
ndb.delete_multi(Conference.query().fetch(keys_only = True))
profiles = Profile.query()
for profile in profiles:
profile.conferenceKeysToAttend = []
profile.sessionKeysInWishlist = []
profile.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage, StringMessage,
path='speaker/get_features',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Get all featured speakers and return json data"""
featuredSpeaker = memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY)
if not featuredSpeaker:
featuredSpeaker = ""
# return json data
return StringMessage(data=json.dumps(featuredSpeaker))
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
@staticmethod
def _cacheFeaturedSpeaker():
"""Get Featured Speaker & assign to memcache;"""
sessions = Session.query()
speakersCounter = {}
featured_speaker = ""
num = 0
for session in sessions:
if session.speaker:
if session.speaker not in speakersCounter:
speakersCounter[session.speaker] = 1
else:
speakersCounter[session.speaker] += 1
if speakersCounter[session.speaker] > num:
featured_speaker = session.speaker
num = speakersCounter[session.speaker]
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, featured_speaker)
return featured_speaker
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
| 40.268939 | 106 | 0.612924 |
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import *
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
typeOfSession=messages.StringField(1),
websafeConferenceKey=messages.StringField(2),
)
SESSION_GET_BY_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
def _copyConferenceToForm(self, conf, displayName):
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Session objects - - - - - - - - - - - - - - - - - - -
def _copySessionToForm(self, session):
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date to date string; just copy others
if field.name.endswith('date') or field.name.endswith('startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "sessionSafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
def _createSessionObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException('Session name is required')
# Get conference object
c_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = c_key.get()
# Check the validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Check the validity of user
if conf.organizerUserId != getUserId(endpoints.get_current_user()):
raise endpoints.ForbiddenException('Only the organizer can create a session.')
# Copy SessionForm
data = {field.name:getattr(request, field.name) for field in request.all_fields()}
# Convert date and time from strings to Date objects;
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:10], "%H, %M").time()
# Assign each session with Conference as parent
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
s_key = ndb.Key(Session, s_id, parent=c_key)
data['Key'] = s_key
data['websafeConferenceKey'] = request.websafeConferenceKey
del data['sessionSafeKey']
# Save session into database
Session(**data).put()
# Send confirmation email to owner
taskqueue.add(params={'email':user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_Confirmation_session_email'
)
return request
@endpoints.method(SessionForm, SessionForm,
path='createSession',
http_method='POST',
name='createSession')
def createSession(self, request):
return self._createSessionObject(request)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
# Get conference key
wsck = request.websafeConferenceKey
# Fetch conference with target key
conf = ndb.Key(urlsage=wsck).get()
# Check validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Create query for this conference
Sessions = Session.query().filter(Session.websafeConferenceKey==wsck)
# Return set of SessionForm objects for each conference
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(SESSION_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/{typeOfSession}',
http_method='GET', name='getConferenceSessionByType')
def getConferenceSessionByType(self, request):
# Get type of session
typeOfSession = request.typeOfSession
# Fetch conference with target key
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# Check the validity of conference
if not conf:
raise endpoints.NotFoundException('No conference is found')
# Create ancestor query for all key matches
sessions = Session.query().filter(Session.typeOfSession==typeOfSession,
Session.websafeConferenceKey==request.websafeConferenceKey)
# Return set of SessionForm objects
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(SESSION_GET_BY_SPEAKER_REQUEST, SessionForms,
path='/sessions/{speaker}',
http_method='GET', name='getSessionBySpeaker')
def getSessionsBySpeaker(self, request):
sessions= Session.query().filter(Session.speaker==request.speaker)
# Return set of SessionForm objects
return SessionForms(items=[self._copySessionToForm(session)
for session in Sessions])
@endpoints.method(CONF_GET_REQUEST, ProfileForms,
path='/getAttendeesByConference/{websafeConferenceKey}',
http_method='GET', name='getAttendeesByConference')
def getAttendeesByConference(self, request):
profiles = Profile.query()
attendees = []
for pro in profiles:
if request.websafeConferenceKey in pro.conferenceKeysToAttend:
attendees.append(pro)
return ProfileForms(items=[self._copyProfileToForm(attendee)
for attendee in attendees])
@endpoints.method(SESSION_REQUEST, ProfileForms,
path='/getAttendeesBySession/{sessionKey}',
http_method='GET', name='GetAttendeesBySession')
def getAttendeesBySession(self, request):
s_Key = request.sessionKey
profiles = Profile.query()
attendees = []
for pro in profiles:
if s_Key in pro.sessionKeysInWishlist:
attendees.append(pro)
return ProfileForms(items=[self._copyProfileToForm(attendee)
for attendee in attendees])
@endpoints.method(SESSION_REQUEST, SessionForm,
path='addSessionToWishlist',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
# Get session key
s_Key = request.sessionKey
# Get session object
session = ndb.Key(urlsafe=s_Key).get()
# Check the validity of session
if not session:
raise endpoints.NotFoundException('No session is found')
# Check the validity of user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
profile = self._getProfileFromUser()
if not profile:
raise endpoints.BadRequestException('Profile does not exist for user')
# Check key and Session
if not type(ndb.Key(urlsafe=sessionKey).get()) == Session:
raise endpoints.NotFoundException('This key is not a Session instance')
# Add session to wishlist
if sessionKey not in profile.sessionKeysInWishlist:
try:
profile.sessionKeysInWishlist.append(sessionKey)
profile.put()
except Exception:
raise endpoints.InternalServerErrorException('Error in storing the wishlist')
return self._copySessionToForm(session)
@endpoints.method(message_types.VoidMessage,SessionForms,
path='getSessionsInWishlist', http_method='GET',
name='getSessionsInWishlist')
def getSessionsInWishlist(self,request):
profile = self._getProfileFromUser()
if not profile:
raise endpoints.BadRequestException('Profile does not exist for user')
# Get all session keys
sessionkeys = [ndb.Key(urlsafe=sessionkey) for sessionkey in profile.sessionKeysInWishlist]
sessions = ndb.get_multi(sessionkeys)
# Return set of SessionForm objects per conference
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(message_types.VoidMessage,BooleanMessage,
path='clearData', http_method='GET',
name='clearData')
def clearData(self,request):
ndb.delete_multi(Session.query().fetch(keys_only = True))
ndb.delete_multi(Conference.query().fetch(keys_only = True))
profiles = Profile.query()
for profile in profiles:
profile.conferenceKeysToAttend = []
profile.sessionKeysInWishlist = []
profile.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage, StringMessage,
path='speaker/get_features',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
featuredSpeaker = memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY)
if not featuredSpeaker:
featuredSpeaker = ""
# return json data
return StringMessage(data=json.dumps(featuredSpeaker))
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
@staticmethod
def _cacheFeaturedSpeaker():
sessions = Session.query()
speakersCounter = {}
featured_speaker = ""
num = 0
for session in sessions:
if session.speaker:
if session.speaker not in speakersCounter:
speakersCounter[session.speaker] = 1
else:
speakersCounter[session.speaker] += 1
if speakersCounter[session.speaker] > num:
featured_speaker = session.speaker
num = speakersCounter[session.speaker]
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, featured_speaker)
return featured_speaker
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
| true | true |
1c320c87b3d8770c338cf4316ed1452f8194399a | 27 | py | Python | src/access/access_exam.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | src/access/access_exam.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | src/access/access_exam.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | print("hello")
a = "test"
| 6.75 | 14 | 0.555556 | print("hello")
a = "test"
| true | true |
1c320c941caf00a0a7504d386a9d4dfb1ff705f3 | 8,153 | py | Python | code/src/functionality_helpers.py | ZohrehShams/IntegrativeRuleExtractionMethodology | fd7b569d11de540ffe94e0cc588e78305e45689e | [
"MIT"
] | 1 | 2022-03-20T12:33:16.000Z | 2022-03-20T12:33:16.000Z | code/src/functionality_helpers.py | ZohrehShams/IntegrativeRuleExtractionMethodology | fd7b569d11de540ffe94e0cc588e78305e45689e | [
"MIT"
] | null | null | null | code/src/functionality_helpers.py | ZohrehShams/IntegrativeRuleExtractionMethodology | fd7b569d11de540ffe94e0cc588e78305e45689e | [
"MIT"
] | 1 | 2022-03-20T12:33:58.000Z | 2022-03-20T12:33:58.000Z | import numpy as np
import pickle
import dnn_re
from evaluate_rules.predict_explain import predict_explain, print_explanation
from evaluate_rules.overlapping_features import features_recurrence_in_explanation
from src import *
from evaluate_rules.predict_explain import predict_explain, print_explanation
from evaluate_rules.overlapping_features import *
from rule_ranking.rank_rules import rank_rule_scores, rank_rule_scores_fav
from rule_ranking.eliminate_rules import eliminate_rules, eliminate_rules_fav_score
from model.generation.helpers.init_dataset_dir import clean_up, clear_file
# Extract ruleset from the entire dataset (no fold split) and saves them
def validate_rem_d(extract_rules_flag=False):
if extract_rules_flag:
X = np.load(N_FOLD_CV_SPLIT_X_data_FP)
y = np.load(N_FOLD_CV_SPLIT_y_data_FP)
# Extract rules
nn_accuracy, nn_auc, rules, re_time, re_memory= dnn_re.run_whole_dataset(X, y, model_fp)
for rule in rules:
print(len(rule.premise))
# Save rules extracted
print('Saving rules extracted...', end='', flush=True)
with open(rules_fp, 'wb') as rules_file:
pickle.dump(rules, rules_file)
print('done')
# Save rule extraction time and memory usage
print('Saving results...', end='', flush=True)
# Prints explanation for an instance generated by random sampling;
# also prints the frequency of features in the explanation
def explain_prediction_entire_data(flag=False):
if flag:
np.random.seed(110)
instance = np.random.uniform(0, 1, 1004)
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
prediction, explanation = predict_explain(rules, instance)
print(print_explanation(prediction, explanation))
print(features_recurrence_in_explanation(explanation))
def explain_prediction(flag=False):
if flag:
np.random.seed(114)
instance = np.random.uniform(0, 1, 1004)
fold = np.random.randint(5)
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
prediction, explanation = predict_explain(rules, instance)
print(print_explanation(prediction, explanation))
print(features_recurrence_in_explanation(explanation))
# Prints the top 10 recurring features in the entire ruleset,
# as well as in the ruleset for each class,
# along with the frequency of operator for each of the top features
def compute_top_recurring_features(flag=False):
if flag:
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
print(features_recurrence(rules, DATA_FP, 10))
print(features_recurrence_per_class(rules, DATA_FP, 10))
print(top_features_operator_frequency_recurrence_per_class(rules, DATA_FP, 10))
# Prints the top 50 recurring features across the folds,
# as well as in the ruleset for each class,
# along with the frequency of operator for each of the top features
def compute_top_recurring_features_across_folds(flag=False):
if flag:
list_of_rules=[]
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
print("features recurrence across folds:")
features_recurrence_across_folds(list_of_rules, DATA_FP, 50)
print('\n')
print("features recurrence per class across folds %s" %(features_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))
print('\n')
print("top features operator frequency recurrence per class across folds %s" %(top_features_operator_frequency_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))
# Shows the frequency of the favourite features in the ruleset
def compute_favourite_features_frequency(rule_path, fav_features, flag=False):
if flag:
with open(rule_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
fav_freq = fav_features_recurrence(rules, DATA_FP, fav_features)
return fav_freq
# Shows the frequency of the favourite features in the ruleset
def compute_favourite_features_frequency_across_folds(percentage, fav_features, flag=False):
if flag:
list_of_rules = []
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp_remaining(N_FOLD_RULES_REMAINING_DP, fold)(percentage), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
fav_freq = fav_features_recurrence_across_folds(list_of_rules, DATA_FP, fav_features)
return fav_freq
# Pick n features at random from the rulset extarcted from the entire dataset
def pick_random_features(n, flag=False):
if flag:
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
favourite_features = random_features_in_rules(rules, DATA_FP, n)
return favourite_features
# Pick n features at random from the entire dataset
def pick_random_features_across_folds(n, flag=False):
if flag:
list_of_rules = []
data_df = pd.read_csv(DATA_FP)
features_name = list(data_df.columns)
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
favourite_features = random_features_in_rules_across_folds(list_of_rules, DATA_FP, n)
return favourite_features
# Ranks the rules extracted from the entire dataset with the option of factoring in favourite features
# in the ranking. Based on the raking, lowest rank rules can be eliminated. n is the percentage of rules
# that will be eliminated. n = 0.5 eliminates 50% of the rules.
def validate_rem_d_ranking_elimination(rank_rules_flag=False, rule_elimination=False, percentage=0):
X = np.load(N_FOLD_CV_SPLIT_X_data_FP)
y = np.load(N_FOLD_CV_SPLIT_y_data_FP)
if rank_rules_flag:
extracted_rules_file_path = rules_fp
with open(extracted_rules_file_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
for rule in rules:
rank_rule_scores(rule, X, y, use_rl=True)
clear_file(extracted_rules_file_path)
print('Saving rules after scoring...', end='', flush=True)
with open(extracted_rules_file_path, 'wb') as rules_file:
pickle.dump(rules, rules_file)
if rule_elimination:
extracted_rules_file_path = rules_fp
remaining_rules = eliminate_rules(extracted_rules_file_path, percentage)
# Save remaining rules
print('Saving remaining rules ...', end='', flush=True)
with open(rules_fp_remaining(percentage), 'wb') as rules_file:
pickle.dump(remaining_rules, rules_file)
print('done')
def validate_rem_d_fav_ranking_elimination(favourite_features=[], rank_rules_fav_flag=False, rule_elimination=False,
percentage=0):
if rank_rules_fav_flag:
extracted_rules_file_path = rules_fp
with open(extracted_rules_file_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
data_df = pd.read_csv(DATA_FP)
features_name = list(data_df.columns)
for rule in rules:
rank_rule_scores_fav(rule, features_name, favourite_features)
clear_file(extracted_rules_file_path)
print('Saving rules after scoring...', end='', flush=True)
with open(extracted_rules_file_path, 'wb') as rules_file:
pickle.dump(rules, rules_file)
if rule_elimination:
extracted_rules_file_path = rules_fp
remaining_rules = eliminate_rules_fav_score(extracted_rules_file_path, percentage)
# Save remaining rules
print('Saving remaining rules ...', end='', flush=True)
with open(rules_fp_remaining(percentage), 'wb') as rules_file:
pickle.dump(remaining_rules, rules_file)
print('done') | 40.562189 | 182 | 0.707592 | import numpy as np
import pickle
import dnn_re
from evaluate_rules.predict_explain import predict_explain, print_explanation
from evaluate_rules.overlapping_features import features_recurrence_in_explanation
from src import *
from evaluate_rules.predict_explain import predict_explain, print_explanation
from evaluate_rules.overlapping_features import *
from rule_ranking.rank_rules import rank_rule_scores, rank_rule_scores_fav
from rule_ranking.eliminate_rules import eliminate_rules, eliminate_rules_fav_score
from model.generation.helpers.init_dataset_dir import clean_up, clear_file
def validate_rem_d(extract_rules_flag=False):
if extract_rules_flag:
X = np.load(N_FOLD_CV_SPLIT_X_data_FP)
y = np.load(N_FOLD_CV_SPLIT_y_data_FP)
nn_accuracy, nn_auc, rules, re_time, re_memory= dnn_re.run_whole_dataset(X, y, model_fp)
for rule in rules:
print(len(rule.premise))
print('Saving rules extracted...', end='', flush=True)
with open(rules_fp, 'wb') as rules_file:
pickle.dump(rules, rules_file)
print('done')
print('Saving results...', end='', flush=True)
def explain_prediction_entire_data(flag=False):
if flag:
np.random.seed(110)
instance = np.random.uniform(0, 1, 1004)
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
prediction, explanation = predict_explain(rules, instance)
print(print_explanation(prediction, explanation))
print(features_recurrence_in_explanation(explanation))
def explain_prediction(flag=False):
if flag:
np.random.seed(114)
instance = np.random.uniform(0, 1, 1004)
fold = np.random.randint(5)
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
prediction, explanation = predict_explain(rules, instance)
print(print_explanation(prediction, explanation))
print(features_recurrence_in_explanation(explanation))
def compute_top_recurring_features(flag=False):
if flag:
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
print(features_recurrence(rules, DATA_FP, 10))
print(features_recurrence_per_class(rules, DATA_FP, 10))
print(top_features_operator_frequency_recurrence_per_class(rules, DATA_FP, 10))
def compute_top_recurring_features_across_folds(flag=False):
if flag:
list_of_rules=[]
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
print("features recurrence across folds:")
features_recurrence_across_folds(list_of_rules, DATA_FP, 50)
print('\n')
print("features recurrence per class across folds %s" %(features_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))
print('\n')
print("top features operator frequency recurrence per class across folds %s" %(top_features_operator_frequency_recurrence_per_class_across_folds(list_of_rules, DATA_FP, 50)))
def compute_favourite_features_frequency(rule_path, fav_features, flag=False):
if flag:
with open(rule_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
fav_freq = fav_features_recurrence(rules, DATA_FP, fav_features)
return fav_freq
def compute_favourite_features_frequency_across_folds(percentage, fav_features, flag=False):
if flag:
list_of_rules = []
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp_remaining(N_FOLD_RULES_REMAINING_DP, fold)(percentage), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
fav_freq = fav_features_recurrence_across_folds(list_of_rules, DATA_FP, fav_features)
return fav_freq
def pick_random_features(n, flag=False):
if flag:
with open(rules_fp, 'rb') as rules_file:
rules = pickle.load(rules_file)
favourite_features = random_features_in_rules(rules, DATA_FP, n)
return favourite_features
def pick_random_features_across_folds(n, flag=False):
if flag:
list_of_rules = []
data_df = pd.read_csv(DATA_FP)
features_name = list(data_df.columns)
for fold in range(0, N_FOLDS):
with open(n_fold_rules_fp(fold), 'rb') as rules_file:
rules = pickle.load(rules_file)
list_of_rules.append(rules)
favourite_features = random_features_in_rules_across_folds(list_of_rules, DATA_FP, n)
return favourite_features
def validate_rem_d_ranking_elimination(rank_rules_flag=False, rule_elimination=False, percentage=0):
X = np.load(N_FOLD_CV_SPLIT_X_data_FP)
y = np.load(N_FOLD_CV_SPLIT_y_data_FP)
if rank_rules_flag:
extracted_rules_file_path = rules_fp
with open(extracted_rules_file_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
for rule in rules:
rank_rule_scores(rule, X, y, use_rl=True)
clear_file(extracted_rules_file_path)
print('Saving rules after scoring...', end='', flush=True)
with open(extracted_rules_file_path, 'wb') as rules_file:
pickle.dump(rules, rules_file)
if rule_elimination:
extracted_rules_file_path = rules_fp
remaining_rules = eliminate_rules(extracted_rules_file_path, percentage)
print('Saving remaining rules ...', end='', flush=True)
with open(rules_fp_remaining(percentage), 'wb') as rules_file:
pickle.dump(remaining_rules, rules_file)
print('done')
def validate_rem_d_fav_ranking_elimination(favourite_features=[], rank_rules_fav_flag=False, rule_elimination=False,
percentage=0):
if rank_rules_fav_flag:
extracted_rules_file_path = rules_fp
with open(extracted_rules_file_path, 'rb') as rules_file:
rules = pickle.load(rules_file)
data_df = pd.read_csv(DATA_FP)
features_name = list(data_df.columns)
for rule in rules:
rank_rule_scores_fav(rule, features_name, favourite_features)
clear_file(extracted_rules_file_path)
print('Saving rules after scoring...', end='', flush=True)
with open(extracted_rules_file_path, 'wb') as rules_file:
pickle.dump(rules, rules_file)
if rule_elimination:
extracted_rules_file_path = rules_fp
remaining_rules = eliminate_rules_fav_score(extracted_rules_file_path, percentage)
print('Saving remaining rules ...', end='', flush=True)
with open(rules_fp_remaining(percentage), 'wb') as rules_file:
pickle.dump(remaining_rules, rules_file)
print('done') | true | true |
1c320cc60c9746c59bab9a2b8976d83777960563 | 13,423 | py | Python | intermediate_source/pruning_tutorial.py | Justin-A/PyTorch-tutorials-kr | 0d8e407523e5e75de0081becf800b82b37eb912f | [
"BSD-3-Clause"
] | 1 | 2021-11-16T05:29:28.000Z | 2021-11-16T05:29:28.000Z | intermediate_source/pruning_tutorial.py | Justin-A/PyTorch-tutorials-kr | 0d8e407523e5e75de0081becf800b82b37eb912f | [
"BSD-3-Clause"
] | null | null | null | intermediate_source/pruning_tutorial.py | Justin-A/PyTorch-tutorials-kr | 0d8e407523e5e75de0081becf800b82b37eb912f | [
"BSD-3-Clause"
] | 1 | 2022-02-27T10:47:39.000Z | 2022-02-27T10:47:39.000Z | # -*- coding: utf-8 -*-
"""
가지치기 기법(Pruning) 튜토리얼
=====================================
**저자**: `Michela Paganini <https://github.com/mickypaganini>`_
**번역** : `안상준 <https://github.com/Justin-A>`_
최첨단 딥러닝 모델들은 굉장히 많은 수의 파라미터값들로 구성되기 때문에, 쉽게 배포되기 어렵습니다.
이와 반대로, 생물학적 신경망들은 효율적으로 희소하게 연결된 것으로 알려져 있습니다.
모델의 정확도가 손상되지 않는 범위에서 메모리, 배터리, 하드웨어 소비량을 줄이고,
기기에 경량화된 모델을 배치하며, 개인이 이용하고 있는 기기에서 프라이버시가 보장되기 위해서는
모델에 포함된 파라미터 수를 줄여 압축하는 최적의 기법을 파악하는 것이 중요합니다.
연구 측면에서는, 가지치기 기법은 굉장히 많은 수의 파라미터값들로 구성된 모델과
굉장히 적은 수의 파라미터값들로 구성된 모델 간 학습 역학 차이를 조사하는데 주로 이용되기도 하며,
하위 신경망 모델과 파라미터값들의 초기화가 운이 좋게 잘 된 케이스를 바탕으로
("`lottery tickets <https://arxiv.org/abs/1803.03635>`_") 신경망 구조를 찾는 기술들에 대해 반대 의견을 제시하기도 합니다.
이번 튜토리얼에서는, ``torch.nn.utils.prune`` 을 이용하여 여러분이 설계한 딥러닝 모델에 대해 가지치기 기법을 적용해보는 것을 배워보고,
심화적으로 여러분의 맞춤형 가지치기 기법을 구현하는 방법에 대해 배워보도록 하겠습니다.
요구사항
------------
``"torch>=1.4"``
"""
import torch
from torch import nn
import torch.nn.utils.prune as prune
import torch.nn.functional as F
######################################################################
# 딥러닝 모델 생성
# -----------------------
# 이번 튜토리얼에서는, 얀 르쿤 교수님의 연구진들이 1998년도에 발표한 ``LeNet
# <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`` 의 모델 구조를 이용합니다.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# 1개 채널 수의 이미지를 입력값으로 이용하여 6개 채널 수의 출력값을 계산하는 방식
# Convolution 연산을 진행하는 커널(필터)의 크기는 3x3 을 이용
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(16 * 5 * 5, 120) # Convolution 연산 결과 5x5 크기의 16 채널 수의 이미지
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, int(x.nelement() / x.shape[0]))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = LeNet().to(device=device)
######################################################################
# 모듈 점검
# -----------------
#
# 가지치기 기법이 적용되지 않은 LeNet 모델의 ``conv1`` 층을 점검해봅시다.
# 여기에는 2개의 파라미터값들인 ``가중치``값과 ``편향``값을이 포함될 것이며, 버퍼는 존재하지 않을 것입니다.
module = model.conv1
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
# 모듈 가지치기 기법 적용 예제
# -----------------------------------
#
# 모듈에 대해 가지치기 기법을 적용하기 위해 (이번 예제에서는, LeNet 모델의 ``conv1`` 층)
# 첫 번째로는, ``torch.nn.utils.prune`` (또는 ``BasePruningMethod`` 의 서브 클래스로 직접 `구현
# <torch-nn-utils-prune>`_ )
# 내 존재하는 가지치기 기법을 선택합니다.
# 그 후, 해당 모듈 내에서 가지치기 기법을 적용하고자 하는 모듈과 파라미터를 지정합니다.
# 마지막으로, 가지치기 기법에 적당한 키워드 인자값을 이용하여 가지치기 매개변수를 지정합니다.
# 이번 예제에서는, ``conv1`` 층의 가중치의 30%값들을 랜덤으로 가지치기 기법을 적용해보겠습니다.
# 모듈은 함수에 대한 첫 번째 인자값으로 전달되며, ``name`` 은 문자열 식별자를 이용하여 해당 모듈 내 매개변수를 구분합니다.
# 그리고, ``amount`` 는 가지치기 기법을 적용하기 위한 대상 가중치값들의 백분율 (0과 1사이의 실수값),
# 혹은 가중치값의 연결의 개수 (음수가 아닌 정수) 를 지정합니다.
prune.random_unstructured(module, name="weight", amount=0.3)
######################################################################
# 가지치기 기법은 가중치값들을 파라미터값들로부터 제거하고 ``weight_orig`` (즉, 초기 가중치 이름에 "_orig"을 붙인) 이라는
# 새로운 파라미터값으로 대체하는 것으로 실행됩니다.
# ``weight_orig`` 은 텐서값에 가지치기 기법이 적용되지 않은 상태를 저장합니다.
# ``bias`` 은 가지치기 기법이 적용되지 않았기 때문에 그대로 남아 있습니다.
print(list(module.named_parameters()))
######################################################################
# 위에서 선택한 가지치기 기법에 의해 생성되는 가지치기 마스크는 초기 파라미터 ``name`` 에 ``weight_mask``
# (즉, 초기 가중치 이름에 "_mask"를 붙인) 이름의 모듈 버퍼로 저장됩니다.
print(list(module.named_buffers()))
######################################################################
# 수정이 되지 않은 상태에서 순전파를 진행하기 위해서는 ``가중치``값 속성이 존재해야 합니다.
# ``torch.nn.utils.prune`` 내 구현된 가지치기 기법은 가지치기 기법이 적용된 가중치값들을 이용하여
# (기존의 가중치값에 가지치기 기법이 적용된) 순전파를 진행하고, ``weight`` 속성값에 가지치기 기법이 적용된 가중치값들을 저장합니다.
# 이제 가중치값들은 ``module`` 의 매개변수가 아니라 하나의 속성값으로 취급되는 점을 주의하세요.
print(module.weight)
######################################################################
# 최종적으로, 가지치기 기법은 파이토치의 ``forward_pre_hooks`` 를 이용하여 각 순전파가 진행되기 전에 가지치기 기법이 적용됩니다.
# 구체적으로, 지금까지 진행한 것 처럼, 모듈이 가지치기 기법이 적용되었을 때,
# 가지치기 기법이 적용된 각 파라미터값들이 ``forward_pre_hook`` 를 얻게됩니다.
# 이러한 경우, ``weight`` 이름인 기존 파라미터값에 대해서만 가지치기 기법을 적용하였기 때문에,
# 훅은 오직 1개만 존재할 것입니다.
print(module._forward_pre_hooks)
######################################################################
# 완결성을 위해, 편향값에 대해서도 가지치기 기법을 적용할 수 있으며,
# 모듈의 파라미터, 버퍼, 훅, 속성값들이 어떻게 변경되는지 확인할 수 있습니다.
# 또 다른 가지치기 기법을 적용해보기 위해, ``l1_unstructured`` 가지치기 함수에서 구현된 내용과 같이,
# L1 Norm 값이 가장 작은 편향값 3개를 가지치기를 시도해봅시다.
prune.l1_unstructured(module, name="bias", amount=3)
######################################################################
# 이전에서 실습한 내용을 토대로, 명명된 파라미터값들이 ``weight_orig``, ``bias_orig`` 2개를 모두 포함할 것이라 예상됩니다.
# 버퍼들은 ``weight_mask``, ``bias_mask`` 2개를 포함할 것입니다.
# 가지치기 기법이 적용된 2개의 텐서값들은 모듈의 속성값으로 존재할 것이며, 모듈은 2개의 ``forward_pre_hooks`` 을 갖게 될 것입니다.
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
print(module.bias)
######################################################################
print(module._forward_pre_hooks)
######################################################################
# 가지치기 기법 반복 적용
# ------------------------------------
#
# 모듈 내 같은 파라미터값에 대해 가지치기 기법이 여러번 적용될 수 있으며, 다양한 가지치기 기법의 조합이 적용된 것과 동일하게 적용될 수 있습니다.
# 새로운 마스크와 이전의 마스크의 결합은 ``PruningContainer`` 의 ``compute_mask`` 메소드를 통해 처리할 수 있습니다.
#
# 예를 들어, 만약 ``module.weight`` 값에 가지치기 기법을 적용하고 싶을 때, 텐서의 0번째 축의 L2 norm값을 기준으로 구조화된 가지치기 기법을 적용합니다.
# (여기서 0번째 축이란, 합성곱 연산을 통해 계산된 출력값에 대해 각 채널별로 적용된다는 것을 의미합니다.)
# 이 방식은 ``ln_structured`` 함수와 ``n=2`` 와 ``dim=0`` 의 인자값을 바탕으로 구현될 수 있습니다.
prune.ln_structured(module, name="weight", amount=0.5, n=2, dim=0)
############################################################################
# 우리가 확인할 수 있듯이, 이전 마스크의 작용을 유지하면서 채널의 50% (6개 중 3개) 에 해당되는 모든 연결을 0으로 변경합니다.
print(module.weight)
############################################################################
# 이에 해당하는 훅은 ``torch.nn.utils.prune.PruningContainer`` 형태로 존재하며, 가중치에 적용된 가지치기 기법의 이력을 저장합니다.
for hook in module._forward_pre_hooks.values():
if hook._tensor_name == "weight": # 가중치에 해당하는 훅을 선택
break
print(list(hook)) # 컨테이너 내 가지치기 기법의 이력
######################################################################
# 가지치기 기법이 적용된 모델의 직렬화
# ---------------------------------------------
# 마스크 버퍼들과 가지치기 기법이 적용된 텐서 계산에 사용된 기존의 파라미터를 포함하여 관련된 모든 텐서값들은
# 필요한 경우 모델의 ``state_dict`` 에 저장되기 떄문에, 쉽게 직렬화하여 저장할 수 있다.
print(model.state_dict().keys())
######################################################################
# 가지치기 기법의 재-파라미터화 제거
# -----------------------------------------
#
# 가지치기 기법이 적용된 것을 영구적으로 만들기 위해서, 재-파라미터화 관점의
# ``weight_orig`` 와 ``weight_mask`` 값을 제거하고, ``forward_pre_hook`` 값을 제거합니다.
# 제거하기 위해 ``torch.nn.utils.prune`` 내 ``remove`` 함수를 이용할 수 있습니다.
# 가지치기 기법이 적용되지 않은 것처럼 실행되는 것이 아닌 점을 주의하세요.
# 이는 단지 가지치기 기법이 적용된 상태에서 가중치 파라미터값을 모델 파라미터값으로 재할당하는 것을 통해 영구적으로 만드는 것일 뿐입니다.
######################################################################
# 재-파라미터화를 제거하기 전 상태
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
print(module.weight)
######################################################################
# 재-파라미터를 제거한 후 상태
prune.remove(module, 'weight')
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
# 모델 내 여러 파라미터값들에 대하여 가지치기 기법 적용
# --------------------------------------
#
# 가지치기 기법을 적용하고 싶은 파라미터값들을 지정함으로써, 이번 예제에서 볼 수 있는 것 처럼,
# 신경망 모델 내 여러 텐서값들에 대해서 쉽게 가지치기 기법을 적용할 수 있습니다.
new_model = LeNet()
for name, module in new_model.named_modules():
# 모든 2D-conv 층의 20% 연결에 대해 가지치기 기법을 적용
if isinstance(module, torch.nn.Conv2d):
prune.l1_unstructured(module, name='weight', amount=0.2)
# 모든 선형 층의 40% 연결에 대해 가지치기 기법을 적용
elif isinstance(module, torch.nn.Linear):
prune.l1_unstructured(module, name='weight', amount=0.4)
print(dict(new_model.named_buffers()).keys()) # 존재하는 모든 마스크들을 확인
######################################################################
# 전역 범위에 대한 가지치기 기법 적용
# ----------------------------------------------
#
# 지금까지, "지역 변수" 에 대해서만 가지치기 기법을 적용하는 방법을 살펴보았습니다.
# (즉, 가중치 규모, 활성화 정도, 경사값 등의 각 항목의 통계량을 바탕으로 모델 내 텐서값 하나씩 가지치기 기법을 적용하는 방식)
# 그러나, 범용적이고 아마 더 강력한 방법은 각 층에서 가장 낮은 20%의 연결을 제거하는것 대신에, 전체 모델에 대해서 가장 낮은 20% 연결을 한번에 제거하는 것입니다.
# 이것은 각 층에 대해서 가지치기 기법을 적용하는 연결의 백분율값을 다르게 만들 가능성이 있습니다.
# ``torch.nn.utils.prune`` 내 ``global_unstructured`` 을 이용하여 어떻게 전역 범위에 대한 가지치기 기법을 적용하는지 살펴봅시다.
model = LeNet()
parameters_to_prune = (
(model.conv1, 'weight'),
(model.conv2, 'weight'),
(model.fc1, 'weight'),
(model.fc2, 'weight'),
(model.fc3, 'weight'),
)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=0.2,
)
######################################################################
# 이제 각 층에 존재하는 연결들에 가지치기 기법이 적용된 정도가 20%가 아닌 것을 확인할 수 있습니다.
# 그러나, 전체 가지치기 적용 범위는 약 20%가 될 것입니다.
print(
"Sparsity in conv1.weight: {:.2f}%".format(
100. * float(torch.sum(model.conv1.weight == 0))
/ float(model.conv1.weight.nelement())
)
)
print(
"Sparsity in conv2.weight: {:.2f}%".format(
100. * float(torch.sum(model.conv2.weight == 0))
/ float(model.conv2.weight.nelement())
)
)
print(
"Sparsity in fc1.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc1.weight == 0))
/ float(model.fc1.weight.nelement())
)
)
print(
"Sparsity in fc2.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc2.weight == 0))
/ float(model.fc2.weight.nelement())
)
)
print(
"Sparsity in fc3.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc3.weight == 0))
/ float(model.fc3.weight.nelement())
)
)
print(
"Global sparsity: {:.2f}%".format(
100. * float(
torch.sum(model.conv1.weight == 0)
+ torch.sum(model.conv2.weight == 0)
+ torch.sum(model.fc1.weight == 0)
+ torch.sum(model.fc2.weight == 0)
+ torch.sum(model.fc3.weight == 0)
)
/ float(
model.conv1.weight.nelement()
+ model.conv2.weight.nelement()
+ model.fc1.weight.nelement()
+ model.fc2.weight.nelement()
+ model.fc3.weight.nelement()
)
)
)
######################################################################
# ``torch.nn.utils.prune`` 에서 확장된 맞춤형 가지치기 기법
# ------------------------------------------------------------------
# 맞춤형 가지치기 기법은, 다른 가지치기 기법을 적용하는 것과 같은 방식으로,
# ``BasePruningMethod`` 의 기본 클래스인 ``nn.utils.prune`` 모듈을 활용하여 구현할 수 있습니다.
# 기본 클래스는 ``__call__``, ``apply_mask``, ``apply``, ``prune``, ``remove`` 메소드들을 내포하고 있습니다.
# 특별한 케이스가 아닌 경우, 기본적으로 구성된 메소드들을 재구성할 필요가 없습니다.
# 그러나, ``__init__`` (구성요소), ``compute_mask``
# (가지치기 기법의 논리에 따라 주어진 텐서값에 마스크를 적용하는 방법) 을 고려하여 구성해야 합니다.
# 게다가, 가지치기 기법을 어떠한 방식으로 적용하는지 명확하게 구성해야 합니다.
# (지원되는 옵션은 ``global``, ``structured``, ``unstructured`` 입니다.)
# 이러한 방식은, 가지치기 기법을 반복적으로 적용해야 하는 경우 마스크를 결합하는 방법을 결정하기 위해 필요합니다.
# 즉, 이미 가지치기 기법이 적용된 모델에 대해서 가지치기 기법을 적용할 때,
# 기존의 가지치기 기법이 적용되지 않은 파라미터 값에 대해 가지치기 기법이 영향을 미칠 것으로 예상됩니다.
# ``PRUNING_TYPE``을 지정한다면, 가지치기 기법을 적용하기 위해 파라미터 값을 올바르게 제거하는
# ``PruningContainer`` (마스크 가지치기 기법을 반복적으로 적용하는 것을 처리하는)를 가능하게 합니다.
# 예를 들어, 다른 모든 항목이 존재하는 텐서를 가지치기 기법을 구현하고 싶을 때,
# (또는, 텐서가 이전에 가지치기 기법에 의해 제거되었거나 남아있는 텐서에 대해)
# 한 층의 개별 연결에 작용하며 전체 유닛/채널 (``'structured'``), 또는 다른 파라미터 간
# (``'global'``) 연결에는 작용하지 않기 때문에 ``PRUNING_TYPE='unstructured'`` 방식으로 진행됩니다.
class FooBarPruningMethod(prune.BasePruningMethod):
"""
텐서 내 다른 항목들에 대해 가지치기 기법을 적용
"""
PRUNING_TYPE = 'unstructured'
def compute_mask(self, t, default_mask):
mask = default_mask.clone()
mask.view(-1)[::2] = 0
return mask
######################################################################
# ``nn.Module`` 의 매개변수에 적용하기 위해 인스턴스화하고 적용하는 간단한 기능을 구현해봅니다.
def foobar_unstructured(module, name):
"""
텐서 내 다른 모든 항목들을 제거하여 `module` 에서 `name` 이라는 파라미터에 대해 가자치기 기법을 적용
다음 내용에 따라 모듈을 수정 (또는 수정된 모듈을 반환):
1) 가지치기 기법에 의해 매개변수 `name` 에 적용된 이진 마스크에 해당하는 명명된 버퍼 `name+'_mask'` 를 추가합니다.
`name` 파라미터는 가지치기 기법이 적용된 것으로 대체되며, 가지치기 기법이 적용되지 않은
기존의 파라미터는 `name+'_orig'` 라는 이름의 새로운 매개변수에 저장됩니다.
인자값:
module (nn.Module): 가지치기 기법을 적용해야하는 텐서를 포함하는 모듈
name (string): 모듈 내 가지치기 기법이 적용될 파라미터의 이름
반환값:
module (nn.Module): 입력 모듈에 대해서 가지치기 기법이 적용된 모듈
예시:
>>> m = nn.Linear(3, 4)
>>> foobar_unstructured(m, name='bias')
"""
FooBarPruningMethod.apply(module, name)
return module
######################################################################
# 한번 해봅시다!
model = LeNet()
foobar_unstructured(model.fc3, name='bias')
print(model.fc3.bias_mask)
| 37.494413 | 99 | 0.53088 |
import torch
from torch import nn
import torch.nn.utils.prune as prune
import torch.nn.functional as F
| true | true |
1c320dffcec86724926d3d7d3a725d71e7aef05a | 1,141 | py | Python | hata/ext/slash/menus/closer.py | albertopoljak/hata | 96d0b3182eb4f5291eaf36bd23d521787c6b01f1 | [
"0BSD"
] | null | null | null | hata/ext/slash/menus/closer.py | albertopoljak/hata | 96d0b3182eb4f5291eaf36bd23d521787c6b01f1 | [
"0BSD"
] | null | null | null | hata/ext/slash/menus/closer.py | albertopoljak/hata | 96d0b3182eb4f5291eaf36bd23d521787c6b01f1 | [
"0BSD"
] | 1 | 2020-09-17T20:10:15.000Z | 2020-09-17T20:10:15.000Z | __all__ = ('Closer', )
from scarletio import CancelledError
from ....discord.interaction import ComponentButton, ComponentRow
from .menu import Menu
from .helpers import EMOJI_CANCEL, top_level_check, top_level_get_timeout, CUSTOM_ID_CANCEL, get_auto_check
class Closer(Menu):
BUTTON_CANCEL = ComponentButton(emoji=EMOJI_CANCEL, custom_id=CUSTOM_ID_CANCEL)
BUTTONS = ComponentRow(BUTTON_CANCEL,)
__slots__ = ('page', 'timeout', 'user_check')
def __init__(self, client, event, page, *, check=..., timeout=-1.0):
if check is ...:
check = get_auto_check(event)
self.page = page
self.timeout = timeout
self.user_check = check
check = top_level_check
get_timeout = top_level_get_timeout
async def initial_invoke(self):
self.content = self.page
self.components = self.BUTTONS
self.allowed_mentions = None
async def invoke(self, event):
interaction = event.interaction
if interaction == self.BUTTON_CANCEL:
self.cancel(CancelledError())
return False
| 27.829268 | 107 | 0.655565 | __all__ = ('Closer', )
from scarletio import CancelledError
from ....discord.interaction import ComponentButton, ComponentRow
from .menu import Menu
from .helpers import EMOJI_CANCEL, top_level_check, top_level_get_timeout, CUSTOM_ID_CANCEL, get_auto_check
class Closer(Menu):
BUTTON_CANCEL = ComponentButton(emoji=EMOJI_CANCEL, custom_id=CUSTOM_ID_CANCEL)
BUTTONS = ComponentRow(BUTTON_CANCEL,)
__slots__ = ('page', 'timeout', 'user_check')
def __init__(self, client, event, page, *, check=..., timeout=-1.0):
if check is ...:
check = get_auto_check(event)
self.page = page
self.timeout = timeout
self.user_check = check
check = top_level_check
get_timeout = top_level_get_timeout
async def initial_invoke(self):
self.content = self.page
self.components = self.BUTTONS
self.allowed_mentions = None
async def invoke(self, event):
interaction = event.interaction
if interaction == self.BUTTON_CANCEL:
self.cancel(CancelledError())
return False
| true | true |
1c321021a8d46291c5590afb59264fb3d1935edc | 956 | py | Python | expert_python/src/socket_http.py | MaiXiaochai/Droplet | 6d7fed9ca76678768a3752fa8df86a021acc3509 | [
"MIT"
] | null | null | null | expert_python/src/socket_http.py | MaiXiaochai/Droplet | 6d7fed9ca76678768a3752fa8df86a021acc3509 | [
"MIT"
] | null | null | null | expert_python/src/socket_http.py | MaiXiaochai/Droplet | 6d7fed9ca76678768a3752fa8df86a021acc3509 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @File : socket_http.py
# @Time : 2019/3/4 23:26
# @Author : MaiXiaochai
# @Site : https://github.com/MaiXiaochai
import socket
from urllib.parse import urlparse
def get_url(url):
# 通过socket请求html
url = urlparse(url)
host = url.netloc
path = url.path
if path == "":
# http的一种请求方式
path = '/'
# 建立socket连接
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 注意这里是80端口
client.connect((host, 80))
# 注意数据格式
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode('utf8'))
# 注意这里如何接收所有数据
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
# 这里注意编码不一定是utf8,视网站而定
data = data.decode('utf8')
print(data)
client.close()
if __name__ == "__main__":
url = 'http://www.baidu.com'
get_url(url)
| 19.916667 | 107 | 0.574268 |
import socket
from urllib.parse import urlparse
def get_url(url):
url = urlparse(url)
host = url.netloc
path = url.path
if path == "":
path = '/'
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, 80))
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode('utf8'))
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
data = data.decode('utf8')
print(data)
client.close()
if __name__ == "__main__":
url = 'http://www.baidu.com'
get_url(url)
| true | true |
1c321066b129f999453f696b189573231cce56ca | 601 | py | Python | AGONS/AGONS/test.py | CWSmith022/yigit-lab | 8ec1f7d0242d36351ef92bc6698358c9431f4c34 | [
"MIT"
] | null | null | null | AGONS/AGONS/test.py | CWSmith022/yigit-lab | 8ec1f7d0242d36351ef92bc6698358c9431f4c34 | [
"MIT"
] | null | null | null | AGONS/AGONS/test.py | CWSmith022/yigit-lab | 8ec1f7d0242d36351ef92bc6698358c9431f4c34 | [
"MIT"
] | null | null | null | # %%
"""Test how custom functions work with sklearn package."""
import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
x = np.array([[1,2,3], [6,5,4], [8,7,9]])
print(x)
def SSRow(X):
X_ = X.copy()
X_t = StandardScaler().fit_transform(X_.T).T
return X_t
def MMRow(X):
X_ = X.copy()
X_t = MinMaxScaler().fit_transform(X_.T).T
return X_t
d = FunctionTransformer(SSRow)
print(d.fit_transform(x))
e = FunctionTransformer(MMRow)
print(e.fit_transform(x))
# %%
"""Testing AGONS with Iris Dataset"""
| 25.041667 | 62 | 0.698835 |
import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
x = np.array([[1,2,3], [6,5,4], [8,7,9]])
print(x)
def SSRow(X):
X_ = X.copy()
X_t = StandardScaler().fit_transform(X_.T).T
return X_t
def MMRow(X):
X_ = X.copy()
X_t = MinMaxScaler().fit_transform(X_.T).T
return X_t
d = FunctionTransformer(SSRow)
print(d.fit_transform(x))
e = FunctionTransformer(MMRow)
print(e.fit_transform(x))
| true | true |
1c32122e73533e7f7ceb0fa50188f83c9951b7fd | 2,283 | py | Python | prepare-data.py | waytrue17/dynamic-training-with-apache-mxnet-on-aws | d6289f4002e4a3886f97a799a68bb653fea12672 | [
"Apache-2.0"
] | 54 | 2018-11-27T06:00:52.000Z | 2022-03-24T09:41:01.000Z | prepare-data.py | waytrue17/dynamic-training-with-apache-mxnet-on-aws | d6289f4002e4a3886f97a799a68bb653fea12672 | [
"Apache-2.0"
] | 3 | 2018-11-27T16:45:44.000Z | 2020-10-21T00:15:02.000Z | prepare-data.py | waytrue17/dynamic-training-with-apache-mxnet-on-aws | d6289f4002e4a3886f97a799a68bb653fea12672 | [
"Apache-2.0"
] | 18 | 2018-11-29T21:18:38.000Z | 2022-03-17T22:18:43.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#!/usr/bin/python
import subprocess
import os
import errno
def download_file(url, local_fname=None, force_write=False):
# requests is not default installed
import requests
if local_fname is None:
local_fname = url.split('/')[-1]
if not force_write and os.path.exists(local_fname):
return local_fname
dir_name = os.path.dirname(local_fname)
if dir_name != "":
if not os.path.exists(dir_name):
try: # try to create the directory if it doesn't exists
os.makedirs(dir_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(local_fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_fname
def download_cifar10():
data_dir="data"
fnames = (os.path.join(data_dir, "cifar10_train.rec"),
os.path.join(data_dir, "cifar10_val.rec"))
download_file('http://data.mxnet.io/data/cifar10/cifar10_val.rec', fnames[1])
download_file('http://data.mxnet.io/data/cifar10/cifar10_train.rec', fnames[0])
return fnames
download_cifar10()
| 40.052632 | 87 | 0.697766 |
import subprocess
import os
import errno
def download_file(url, local_fname=None, force_write=False):
import requests
if local_fname is None:
local_fname = url.split('/')[-1]
if not force_write and os.path.exists(local_fname):
return local_fname
dir_name = os.path.dirname(local_fname)
if dir_name != "":
if not os.path.exists(dir_name):
try:
os.makedirs(dir_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(local_fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_fname
def download_cifar10():
data_dir="data"
fnames = (os.path.join(data_dir, "cifar10_train.rec"),
os.path.join(data_dir, "cifar10_val.rec"))
download_file('http://data.mxnet.io/data/cifar10/cifar10_val.rec', fnames[1])
download_file('http://data.mxnet.io/data/cifar10/cifar10_train.rec', fnames[0])
return fnames
download_cifar10()
| true | true |
1c3213af9141a47d8c41a7dc75aec7b0dc6fc928 | 6,351 | py | Python | test/unit/module/config/test_config_mixin.py | Adam-sHub/cfn-lint | 4c501d01f87ec0ef9432dc407c5a9ac0025f00b6 | [
"MIT-0"
] | 1,134 | 2019-03-02T14:58:34.000Z | 2021-05-15T00:57:16.000Z | test/unit/module/config/test_config_mixin.py | Adam-sHub/cfn-lint | 4c501d01f87ec0ef9432dc407c5a9ac0025f00b6 | [
"MIT-0"
] | 1,122 | 2019-03-03T04:27:15.000Z | 2021-05-14T20:51:16.000Z | test/unit/module/config/test_config_mixin.py | Adam-sHub/cfn-lint | 4c501d01f87ec0ef9432dc407c5a9ac0025f00b6 | [
"MIT-0"
] | 297 | 2019-03-11T09:56:57.000Z | 2021-05-14T16:41:19.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import logging
import os
from test.testlib.testcase import BaseTestCase
from mock import patch
import cfnlint.config # pylint: disable=E0401
from cfnlint.helpers import REGIONS
LOGGER = logging.getLogger('cfnlint')
class TestConfigMixIn(BaseTestCase):
"""Test ConfigParser Arguments """
def tearDown(self):
"""Setup"""
for handler in LOGGER.handlers:
LOGGER.removeHandler(handler)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_mix_in(self, yaml_mock):
""" Test mix in """
yaml_mock.side_effect = [
{"include_checks": ["I", "I1111"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--regions', 'us-west-1'])
self.assertEqual(config.regions, ['us-west-1'])
self.assertEqual(config.include_checks, ['W', 'E', 'I', 'I1111'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_precedence(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{"include_checks": ["I"], "ignore_checks": ["E3001"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--include-checks', 'I1234', 'I4321'])
config.template_args = {
'Metadata': {
'cfn-lint': {
'config': {
'include_checks': ['I9876'],
'ignore_checks': ['W3001']
}
}
}
}
# config files wins
self.assertEqual(config.regions, ['us-west-2'])
# CLI should win
self.assertEqual(config.include_checks, ['W', 'E', 'I1234', 'I4321'])
# template file wins over config file
self.assertEqual(config.ignore_checks, ['W3001'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_file_output(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{
"output_file": "test_output.txt"
},
{}
]
config = cfnlint.config.ConfigMixIn([])
# Config file wins
self.assertEqual(config.output_file, 'test_output.txt')
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_file_output_mixin(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{
"output_file": "test_output.txt"
},
{}
]
config = cfnlint.config.ConfigMixIn(['--output-file', 'test_output_2.txt'])
# CLI args win
self.assertEqual(config.output_file, 'test_output_2.txt')
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_default_region(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{},
{}
]
config = cfnlint.config.ConfigMixIn([])
# test defaults
self.assertEqual(config.regions, ['us-east-1'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_all_regions(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{'regions': ['ALL_REGIONS']},
{}
]
config = cfnlint.config.ConfigMixIn([])
# test defaults
self.assertEqual(config.regions, REGIONS)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_paths(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{'templates': ['test/fixtures/templates/public/*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
# test defaults
self.assertEqual(config.templates, [
'test/fixtures/templates/public' + os.path.sep + 'lambda-poller.yaml',
'test/fixtures/templates/public' + os.path.sep + 'rds-cluster.yaml'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_paths_failure(self, yaml_mock):
""" Test precedence in """
yaml_mock.side_effect = [
{'templates': ['test/fixtures/templates/badpath/*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
# test defaults
self.assertEqual(config.templates, ['test/fixtures/templates/badpath/*.yaml'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_ignore_templates(self, yaml_mock):
""" Test ignore templates """
yaml_mock.side_effect = [
{
'templates': ['test/fixtures/templates/bad/resources/iam/*.yaml'],
'ignore_templates': ['test/fixtures/templates/bad/resources/iam/resource_*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
# test defaults
self.assertNotIn(
'test/fixtures/templates/bad/resources/iam/resource_policy.yaml', config.templates)
self.assertEqual(len(config.templates), 5)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_merge(self, yaml_mock):
""" Test merging lists """
yaml_mock.side_effect = [
{"include_checks": ["I"], "ignore_checks": ["E3001"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--include-checks', 'I1234', 'I4321', '--merge-configs'])
config.template_args = {
'Metadata': {
'cfn-lint': {
'config': {
'include_checks': ['I9876'],
'ignore_checks': ['W3001']
}
}
}
}
# config files wins
self.assertEqual(config.regions, ['us-west-2'])
# CLI should win
self.assertEqual(config.include_checks, ['W', 'E', 'I1234', 'I4321', 'I9876', 'I'])
# template file wins over config file
self.assertEqual(config.ignore_checks, ['W3001', 'E3001'])
| 33.781915 | 102 | 0.572981 | import logging
import os
from test.testlib.testcase import BaseTestCase
from mock import patch
import cfnlint.config
from cfnlint.helpers import REGIONS
LOGGER = logging.getLogger('cfnlint')
class TestConfigMixIn(BaseTestCase):
def tearDown(self):
for handler in LOGGER.handlers:
LOGGER.removeHandler(handler)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_mix_in(self, yaml_mock):
yaml_mock.side_effect = [
{"include_checks": ["I", "I1111"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--regions', 'us-west-1'])
self.assertEqual(config.regions, ['us-west-1'])
self.assertEqual(config.include_checks, ['W', 'E', 'I', 'I1111'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_precedence(self, yaml_mock):
yaml_mock.side_effect = [
{"include_checks": ["I"], "ignore_checks": ["E3001"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--include-checks', 'I1234', 'I4321'])
config.template_args = {
'Metadata': {
'cfn-lint': {
'config': {
'include_checks': ['I9876'],
'ignore_checks': ['W3001']
}
}
}
}
self.assertEqual(config.regions, ['us-west-2'])
self.assertEqual(config.include_checks, ['W', 'E', 'I1234', 'I4321'])
self.assertEqual(config.ignore_checks, ['W3001'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_file_output(self, yaml_mock):
yaml_mock.side_effect = [
{
"output_file": "test_output.txt"
},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertEqual(config.output_file, 'test_output.txt')
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_file_output_mixin(self, yaml_mock):
yaml_mock.side_effect = [
{
"output_file": "test_output.txt"
},
{}
]
config = cfnlint.config.ConfigMixIn(['--output-file', 'test_output_2.txt'])
self.assertEqual(config.output_file, 'test_output_2.txt')
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_default_region(self, yaml_mock):
yaml_mock.side_effect = [
{},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertEqual(config.regions, ['us-east-1'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_all_regions(self, yaml_mock):
yaml_mock.side_effect = [
{'regions': ['ALL_REGIONS']},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertEqual(config.regions, REGIONS)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_paths(self, yaml_mock):
yaml_mock.side_effect = [
{'templates': ['test/fixtures/templates/public/*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertEqual(config.templates, [
'test/fixtures/templates/public' + os.path.sep + 'lambda-poller.yaml',
'test/fixtures/templates/public' + os.path.sep + 'rds-cluster.yaml'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_paths_failure(self, yaml_mock):
yaml_mock.side_effect = [
{'templates': ['test/fixtures/templates/badpath/*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertEqual(config.templates, ['test/fixtures/templates/badpath/*.yaml'])
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_expand_ignore_templates(self, yaml_mock):
yaml_mock.side_effect = [
{
'templates': ['test/fixtures/templates/bad/resources/iam/*.yaml'],
'ignore_templates': ['test/fixtures/templates/bad/resources/iam/resource_*.yaml']},
{}
]
config = cfnlint.config.ConfigMixIn([])
self.assertNotIn(
'test/fixtures/templates/bad/resources/iam/resource_policy.yaml', config.templates)
self.assertEqual(len(config.templates), 5)
@patch('cfnlint.config.ConfigFileArgs._read_config', create=True)
def test_config_merge(self, yaml_mock):
yaml_mock.side_effect = [
{"include_checks": ["I"], "ignore_checks": ["E3001"], "regions": ["us-west-2"]},
{}
]
config = cfnlint.config.ConfigMixIn(['--include-checks', 'I1234', 'I4321', '--merge-configs'])
config.template_args = {
'Metadata': {
'cfn-lint': {
'config': {
'include_checks': ['I9876'],
'ignore_checks': ['W3001']
}
}
}
}
self.assertEqual(config.regions, ['us-west-2'])
self.assertEqual(config.include_checks, ['W', 'E', 'I1234', 'I4321', 'I9876', 'I'])
self.assertEqual(config.ignore_checks, ['W3001', 'E3001'])
| true | true |
1c32142d0236311c8aff644fbcf2d183aa3841e8 | 1,544 | py | Python | pip_services3_messaging/test/TestMessageReceiver.py | pip-services-python/pip-services-messaging-python | edaca5cd620a51e9d9f713811e64bb0f532851ce | [
"MIT"
] | null | null | null | pip_services3_messaging/test/TestMessageReceiver.py | pip-services-python/pip-services-messaging-python | edaca5cd620a51e9d9f713811e64bb0f532851ce | [
"MIT"
] | null | null | null | pip_services3_messaging/test/TestMessageReceiver.py | pip-services-python/pip-services-messaging-python | edaca5cd620a51e9d9f713811e64bb0f532851ce | [
"MIT"
] | 1 | 2020-03-19T22:19:30.000Z | 2020-03-19T22:19:30.000Z | # -*- coding: utf-8 -*-
import threading
from typing import List, Optional
from pip_services3_commons.run import ICleanable
from pip_services3_messaging.queues import IMessageReceiver, MessageEnvelope, IMessageQueue
class TestMessageReceiver(IMessageReceiver, ICleanable):
"""
TODO add description
"""
def __init__(self):
self.__messages: List[MessageEnvelope] = []
self.__lock = threading.Lock()
@property
def messages(self) -> List[MessageEnvelope]:
"""
Gets the list of received messages.
"""
return self.__messages
@property
def message_count(self) -> int:
"""
Gets the received message count.
"""
return len(self.__messages)
def receive_message(self, message: MessageEnvelope, queue: IMessageQueue):
"""
Receives incoming message from the queue.
:param message: an incoming message
:param queue: a queue where the message comes from
See :class:`MessageEnvelope <pip_services3_messaging.queues.MessageEnvelope.MessageEnvelope>`,
class:`IMessageQueue <pip_services3_messaging.queues.IMessageQueue.IMessageQueue>`
"""
with self.__lock:
self.__messages.append(message)
def clear(self, correlation_id: Optional[str]):
"""
Clears all received messagers.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
with self.__lock:
self.__messages = []
| 29.132075 | 102 | 0.658031 |
import threading
from typing import List, Optional
from pip_services3_commons.run import ICleanable
from pip_services3_messaging.queues import IMessageReceiver, MessageEnvelope, IMessageQueue
class TestMessageReceiver(IMessageReceiver, ICleanable):
def __init__(self):
self.__messages: List[MessageEnvelope] = []
self.__lock = threading.Lock()
@property
def messages(self) -> List[MessageEnvelope]:
return self.__messages
@property
def message_count(self) -> int:
return len(self.__messages)
def receive_message(self, message: MessageEnvelope, queue: IMessageQueue):
with self.__lock:
self.__messages.append(message)
def clear(self, correlation_id: Optional[str]):
with self.__lock:
self.__messages = []
| true | true |
1c32155b38f939a3e5358977f470abd59cc365fb | 2,354 | py | Python | tflite_handtrack/handtrack.py | slothkong/handtrack | e4825535f858f83c15dc611fd80953313177f835 | [
"Apache-2.0"
] | null | null | null | tflite_handtrack/handtrack.py | slothkong/handtrack | e4825535f858f83c15dc611fd80953313177f835 | [
"Apache-2.0"
] | 5 | 2020-02-07T20:38:13.000Z | 2022-02-10T00:38:26.000Z | tflite_handtrack/handtrack.py | slothkong/handtrack | e4825535f858f83c15dc611fd80953313177f835 | [
"Apache-2.0"
] | null | null | null | """
Custom script to perform hand tracking and optionally cropping and saving the bounding boxes content. I created it using
the following libraries/resources:
1. Video capture uses Opencv to stream from a webcam.
2. The detection utilizes a pre-trained Palm Detector model developed by Google AI Research,
which was converted to a .tflite format for deployment on mobile devices. The model is available at:
https://github.com/google/mediapipe/blob/master/mediapipe/docs/hand_detection_mobile_gpu.md
3. The handling of the Tensorflow Lite model is based on examples available at:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/
"""
import os
import time
import argparse
import numpy as np
import cv2
from utils import preprocess_image, rescale_bbox
from detector import Detector
def parse_arguments():
"""
Parse command line arguments
:return: Parsed arguments
"""
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--modeldir",
help="Folder the .tflite file is located.",
default="../tflite_model/")
parser.add_argument("--graph",
help="Name of the .tflite file.",
default="palm_detection_without_custom_op.tflite")
parser.add_argument("--labels",
help="Name of the labelmap file.",
default="palm_detection_labelmap.txt")
parser.add_argument("--min_conf",
help="Minimum confidence threshold for displaying detected hand palm.",
type=float,
default=0.7)
parser.add_argument("--input_filename",
help="Full filename of input file to process. Support formats: mp4, mp3, jpg, png",
required=True)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_arguments()
input_filename = args.input_filename
if os.splittext(input_filename)[1] in ["mp4", "mp3"]:
pass
elif os.splittext(input_filename)[1] in ["jpg", "png"]:
pass
else:
raise RuntimeError("Format of input file is not supported")
if __name__ == "__main__":
raise NotImplementedError("Implementation pending")
| 32.694444 | 120 | 0.657604 |
import os
import time
import argparse
import numpy as np
import cv2
from utils import preprocess_image, rescale_bbox
from detector import Detector
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--modeldir",
help="Folder the .tflite file is located.",
default="../tflite_model/")
parser.add_argument("--graph",
help="Name of the .tflite file.",
default="palm_detection_without_custom_op.tflite")
parser.add_argument("--labels",
help="Name of the labelmap file.",
default="palm_detection_labelmap.txt")
parser.add_argument("--min_conf",
help="Minimum confidence threshold for displaying detected hand palm.",
type=float,
default=0.7)
parser.add_argument("--input_filename",
help="Full filename of input file to process. Support formats: mp4, mp3, jpg, png",
required=True)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_arguments()
input_filename = args.input_filename
if os.splittext(input_filename)[1] in ["mp4", "mp3"]:
pass
elif os.splittext(input_filename)[1] in ["jpg", "png"]:
pass
else:
raise RuntimeError("Format of input file is not supported")
if __name__ == "__main__":
raise NotImplementedError("Implementation pending")
| true | true |
1c3215928d8c8c04642d33f7563762d1827f09b1 | 2,711 | py | Python | theano/scan_module/tests/test_scan_checkpoints.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | theano/scan_module/tests/test_scan_checkpoints.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | theano/scan_module/tests/test_scan_checkpoints.py | gundun/theano | 09d17fff10487dca7149e34601b8c6efdc572a19 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, division
import numpy
import unittest
import theano
import theano.tensor as T
try:
from pygpu.gpuarray import GpuArrayException
PYGPU_AVAILABLE = True
except ImportError:
PYGPU_AVAILABLE = False
class TestScanCheckpoint(unittest.TestCase):
def setUp(self):
self.k = T.iscalar("k")
self.A = T.vector("A")
result, _ = theano.scan(
fn=lambda prior_result, A: prior_result * A,
outputs_info=T.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k)
result_check, _ = theano.scan_checkpoints(
fn=lambda prior_result, A: prior_result * A,
outputs_info=T.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
save_every_N=100)
self.result = result[-1]
self.result_check = result_check[-1]
self.grad_A = T.grad(self.result.sum(), self.A)
self.grad_A_check = T.grad(self.result_check.sum(), self.A)
def test_forward_pass(self):
"""Test forward computation of A**k."""
f = theano.function(inputs=[self.A, self.k],
outputs=[self.result, self.result_check])
out, out_check = f(range(10), 100)
assert numpy.allclose(out, out_check)
def test_backward_pass(self):
"""Test gradient computation of A**k."""
f = theano.function(inputs=[self.A, self.k],
outputs=[self.grad_A, self.grad_A_check])
out, out_check = f(range(10), 100)
assert numpy.allclose(out, out_check)
@unittest.skipUnless(PYGPU_AVAILABLE, 'Requires pygpu.')
def test_memory(self):
"""Test that scan_checkpoint reduces memory usage."""
if None not in theano.gpuarray.type.list_contexts():
return unittest.SkipTest('Requires gpuarray backend.')
f = theano.function(inputs=[self.A, self.k],
outputs=self.grad_A)
f_check = theano.function(inputs=[self.A, self.k],
outputs=self.grad_A_check)
free_gmem = theano.gpuarray.type._context_reg[None].free_gmem
data = numpy.ones(free_gmem / 3000, dtype=numpy.float32)
# Check that it works with the checkpoints
f_check(data, 1000)
# Check that the basic scan fails in that case
self.assertRaises(GpuArrayException, f, data, 1000)
def test_taps_error(self):
"""Test that an error rises if we use taps in outputs_info."""
self.assertRaises(RuntimeError, theano.scan_checkpoints,
lambda: None, [], {'initial': self.A, 'taps': [-2]})
| 38.183099 | 78 | 0.61564 | from __future__ import absolute_import, print_function, division
import numpy
import unittest
import theano
import theano.tensor as T
try:
from pygpu.gpuarray import GpuArrayException
PYGPU_AVAILABLE = True
except ImportError:
PYGPU_AVAILABLE = False
class TestScanCheckpoint(unittest.TestCase):
def setUp(self):
self.k = T.iscalar("k")
self.A = T.vector("A")
result, _ = theano.scan(
fn=lambda prior_result, A: prior_result * A,
outputs_info=T.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k)
result_check, _ = theano.scan_checkpoints(
fn=lambda prior_result, A: prior_result * A,
outputs_info=T.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
save_every_N=100)
self.result = result[-1]
self.result_check = result_check[-1]
self.grad_A = T.grad(self.result.sum(), self.A)
self.grad_A_check = T.grad(self.result_check.sum(), self.A)
def test_forward_pass(self):
f = theano.function(inputs=[self.A, self.k],
outputs=[self.result, self.result_check])
out, out_check = f(range(10), 100)
assert numpy.allclose(out, out_check)
def test_backward_pass(self):
f = theano.function(inputs=[self.A, self.k],
outputs=[self.grad_A, self.grad_A_check])
out, out_check = f(range(10), 100)
assert numpy.allclose(out, out_check)
@unittest.skipUnless(PYGPU_AVAILABLE, 'Requires pygpu.')
def test_memory(self):
if None not in theano.gpuarray.type.list_contexts():
return unittest.SkipTest('Requires gpuarray backend.')
f = theano.function(inputs=[self.A, self.k],
outputs=self.grad_A)
f_check = theano.function(inputs=[self.A, self.k],
outputs=self.grad_A_check)
free_gmem = theano.gpuarray.type._context_reg[None].free_gmem
data = numpy.ones(free_gmem / 3000, dtype=numpy.float32)
f_check(data, 1000)
self.assertRaises(GpuArrayException, f, data, 1000)
def test_taps_error(self):
self.assertRaises(RuntimeError, theano.scan_checkpoints,
lambda: None, [], {'initial': self.A, 'taps': [-2]})
| true | true |
1c32165cf5ae8a05b1774be9a26bf5f4f47899aa | 64 | py | Python | network/__init__.py | laerreal/librfunc | 5f46e75d52966481c19ca19081892ff9b2c17990 | [
"BSD-3-Clause"
] | null | null | null | network/__init__.py | laerreal/librfunc | 5f46e75d52966481c19ca19081892ff9b2c17990 | [
"BSD-3-Clause"
] | null | null | null | network/__init__.py | laerreal/librfunc | 5f46e75d52966481c19ca19081892ff9b2c17990 | [
"BSD-3-Clause"
] | null | null | null | from ..importall import gen_this
gen_this()
from .this import *
| 16 | 32 | 0.765625 | from ..importall import gen_this
gen_this()
from .this import *
| true | true |
1c3216d667f35002ba73f3edc96d6e94321b667a | 20,079 | py | Python | python/services/bigquery/beta/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/bigquery/beta/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/bigquery/beta/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2_grpc
from typing import List
class Routine(object):
def __init__(
self,
etag: str = None,
name: str = None,
project: str = None,
dataset: str = None,
routine_type: str = None,
creation_time: int = None,
last_modified_time: int = None,
language: str = None,
arguments: list = None,
return_type: dict = None,
imported_libraries: list = None,
definition_body: str = None,
description: str = None,
determinism_level: str = None,
strict_mode: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.project = project
self.dataset = dataset
self.routine_type = routine_type
self.language = language
self.arguments = arguments
self.return_type = return_type
self.imported_libraries = imported_libraries
self.definition_body = definition_body
self.description = description
self.determinism_level = determinism_level
self.strict_mode = strict_mode
self.service_account_file = service_account_file
def apply(self):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.ApplyBigqueryBetaRoutineRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
request.service_account_file = self.service_account_file
response = stub.ApplyBigqueryBetaRoutine(request)
self.etag = Primitive.from_proto(response.etag)
self.name = Primitive.from_proto(response.name)
self.project = Primitive.from_proto(response.project)
self.dataset = Primitive.from_proto(response.dataset)
self.routine_type = RoutineRoutineTypeEnum.from_proto(response.routine_type)
self.creation_time = Primitive.from_proto(response.creation_time)
self.last_modified_time = Primitive.from_proto(response.last_modified_time)
self.language = RoutineLanguageEnum.from_proto(response.language)
self.arguments = RoutineArgumentsArray.from_proto(response.arguments)
self.return_type = RoutineArgumentsDataType.from_proto(response.return_type)
self.imported_libraries = Primitive.from_proto(response.imported_libraries)
self.definition_body = Primitive.from_proto(response.definition_body)
self.description = Primitive.from_proto(response.description)
self.determinism_level = RoutineDeterminismLevelEnum.from_proto(
response.determinism_level
)
self.strict_mode = Primitive.from_proto(response.strict_mode)
def delete(self):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.DeleteBigqueryBetaRoutineRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
response = stub.DeleteBigqueryBetaRoutine(request)
@classmethod
def list(self, project, dataset, service_account_file=""):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.ListBigqueryBetaRoutineRequest()
request.service_account_file = service_account_file
request.Project = project
request.Dataset = dataset
return stub.ListBigqueryBetaRoutine(request).items
def to_proto(self):
resource = routine_pb2.BigqueryBetaRoutine()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
resource.routine_type = RoutineRoutineTypeEnum.to_proto(self.routine_type)
if RoutineLanguageEnum.to_proto(self.language):
resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
resource.arguments.extend(RoutineArgumentsArray.to_proto(self.arguments))
if RoutineArgumentsDataType.to_proto(self.return_type):
resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
resource.strict_mode = Primitive.to_proto(self.strict_mode)
return resource
class RoutineArguments(object):
def __init__(
self,
name: str = None,
argument_kind: str = None,
mode: str = None,
data_type: dict = None,
):
self.name = name
self.argument_kind = argument_kind
self.mode = mode
self.data_type = data_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArguments()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsArgumentKindEnum.to_proto(resource.argument_kind):
res.argument_kind = RoutineArgumentsArgumentKindEnum.to_proto(
resource.argument_kind
)
if RoutineArgumentsModeEnum.to_proto(resource.mode):
res.mode = RoutineArgumentsModeEnum.to_proto(resource.mode)
if RoutineArgumentsDataType.to_proto(resource.data_type):
res.data_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.data_type)
)
else:
res.ClearField("data_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArguments(
name=Primitive.from_proto(resource.name),
argument_kind=RoutineArgumentsArgumentKindEnum.from_proto(
resource.argument_kind
),
mode=RoutineArgumentsModeEnum.from_proto(resource.mode),
data_type=RoutineArgumentsDataType.from_proto(resource.data_type),
)
class RoutineArgumentsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArguments.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArguments.from_proto(i) for i in resources]
class RoutineArgumentsDataType(object):
def __init__(
self,
type_kind: str = None,
array_element_type: dict = None,
struct_type: dict = None,
):
self.type_kind = type_kind
self.array_element_type = array_element_type
self.struct_type = struct_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataType()
if RoutineArgumentsDataTypeTypeKindEnum.to_proto(resource.type_kind):
res.type_kind = RoutineArgumentsDataTypeTypeKindEnum.to_proto(
resource.type_kind
)
if RoutineArgumentsDataType.to_proto(resource.array_element_type):
res.array_element_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.array_element_type)
)
else:
res.ClearField("array_element_type")
if RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type):
res.struct_type.CopyFrom(
RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type)
)
else:
res.ClearField("struct_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataType(
type_kind=RoutineArgumentsDataTypeTypeKindEnum.from_proto(
resource.type_kind
),
array_element_type=RoutineArgumentsDataType.from_proto(
resource.array_element_type
),
struct_type=RoutineArgumentsDataTypeStructType.from_proto(
resource.struct_type
),
)
class RoutineArgumentsDataTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructType(object):
def __init__(self, fields: list = None):
self.fields = fields
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataTypeStructType()
if RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields):
res.fields.extend(
RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructType(
fields=RoutineArgumentsDataTypeStructTypeFieldsArray.from_proto(
resource.fields
),
)
class RoutineArgumentsDataTypeStructTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataTypeStructType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructTypeFields(object):
def __init__(self, name: str = None, type: dict = None):
self.name = name
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataTypeStructTypeFields()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsDataType.to_proto(resource.type):
res.type.CopyFrom(RoutineArgumentsDataType.to_proto(resource.type))
else:
res.ClearField("type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructTypeFields(
name=Primitive.from_proto(resource.name),
type=RoutineArgumentsDataType.from_proto(resource.type),
)
class RoutineArgumentsDataTypeStructTypeFieldsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructTypeFields.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
RoutineArgumentsDataTypeStructTypeFields.from_proto(i) for i in resources
]
class RoutineRoutineTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineRoutineTypeEnum.Value(
"BigqueryBetaRoutineRoutineTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineRoutineTypeEnum.Name(resource)[
len("BigqueryBetaRoutineRoutineTypeEnum") :
]
class RoutineLanguageEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineLanguageEnum.Value(
"BigqueryBetaRoutineLanguageEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineLanguageEnum.Name(resource)[
len("BigqueryBetaRoutineLanguageEnum") :
]
class RoutineArgumentsArgumentKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsArgumentKindEnum.Value(
"BigqueryBetaRoutineArgumentsArgumentKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsArgumentKindEnum.Name(resource)[
len("BigqueryBetaRoutineArgumentsArgumentKindEnum") :
]
class RoutineArgumentsModeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsModeEnum.Value(
"BigqueryBetaRoutineArgumentsModeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsModeEnum.Name(resource)[
len("BigqueryBetaRoutineArgumentsModeEnum") :
]
class RoutineArgumentsDataTypeTypeKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum.Value(
"BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum.Name(
resource
)[len("BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum") :]
class RoutineDeterminismLevelEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineDeterminismLevelEnum.Value(
"BigqueryBetaRoutineDeterminismLevelEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineDeterminismLevelEnum.Name(resource)[
len("BigqueryBetaRoutineDeterminismLevelEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 36.243682 | 88 | 0.675581 |
from connector import channel
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2_grpc
from typing import List
class Routine(object):
def __init__(
self,
etag: str = None,
name: str = None,
project: str = None,
dataset: str = None,
routine_type: str = None,
creation_time: int = None,
last_modified_time: int = None,
language: str = None,
arguments: list = None,
return_type: dict = None,
imported_libraries: list = None,
definition_body: str = None,
description: str = None,
determinism_level: str = None,
strict_mode: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.project = project
self.dataset = dataset
self.routine_type = routine_type
self.language = language
self.arguments = arguments
self.return_type = return_type
self.imported_libraries = imported_libraries
self.definition_body = definition_body
self.description = description
self.determinism_level = determinism_level
self.strict_mode = strict_mode
self.service_account_file = service_account_file
def apply(self):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.ApplyBigqueryBetaRoutineRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
request.service_account_file = self.service_account_file
response = stub.ApplyBigqueryBetaRoutine(request)
self.etag = Primitive.from_proto(response.etag)
self.name = Primitive.from_proto(response.name)
self.project = Primitive.from_proto(response.project)
self.dataset = Primitive.from_proto(response.dataset)
self.routine_type = RoutineRoutineTypeEnum.from_proto(response.routine_type)
self.creation_time = Primitive.from_proto(response.creation_time)
self.last_modified_time = Primitive.from_proto(response.last_modified_time)
self.language = RoutineLanguageEnum.from_proto(response.language)
self.arguments = RoutineArgumentsArray.from_proto(response.arguments)
self.return_type = RoutineArgumentsDataType.from_proto(response.return_type)
self.imported_libraries = Primitive.from_proto(response.imported_libraries)
self.definition_body = Primitive.from_proto(response.definition_body)
self.description = Primitive.from_proto(response.description)
self.determinism_level = RoutineDeterminismLevelEnum.from_proto(
response.determinism_level
)
self.strict_mode = Primitive.from_proto(response.strict_mode)
def delete(self):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.DeleteBigqueryBetaRoutineRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
response = stub.DeleteBigqueryBetaRoutine(request)
@classmethod
def list(self, project, dataset, service_account_file=""):
stub = routine_pb2_grpc.BigqueryBetaRoutineServiceStub(channel.Channel())
request = routine_pb2.ListBigqueryBetaRoutineRequest()
request.service_account_file = service_account_file
request.Project = project
request.Dataset = dataset
return stub.ListBigqueryBetaRoutine(request).items
def to_proto(self):
resource = routine_pb2.BigqueryBetaRoutine()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
resource.routine_type = RoutineRoutineTypeEnum.to_proto(self.routine_type)
if RoutineLanguageEnum.to_proto(self.language):
resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
resource.arguments.extend(RoutineArgumentsArray.to_proto(self.arguments))
if RoutineArgumentsDataType.to_proto(self.return_type):
resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
resource.strict_mode = Primitive.to_proto(self.strict_mode)
return resource
class RoutineArguments(object):
def __init__(
self,
name: str = None,
argument_kind: str = None,
mode: str = None,
data_type: dict = None,
):
self.name = name
self.argument_kind = argument_kind
self.mode = mode
self.data_type = data_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArguments()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsArgumentKindEnum.to_proto(resource.argument_kind):
res.argument_kind = RoutineArgumentsArgumentKindEnum.to_proto(
resource.argument_kind
)
if RoutineArgumentsModeEnum.to_proto(resource.mode):
res.mode = RoutineArgumentsModeEnum.to_proto(resource.mode)
if RoutineArgumentsDataType.to_proto(resource.data_type):
res.data_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.data_type)
)
else:
res.ClearField("data_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArguments(
name=Primitive.from_proto(resource.name),
argument_kind=RoutineArgumentsArgumentKindEnum.from_proto(
resource.argument_kind
),
mode=RoutineArgumentsModeEnum.from_proto(resource.mode),
data_type=RoutineArgumentsDataType.from_proto(resource.data_type),
)
class RoutineArgumentsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArguments.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArguments.from_proto(i) for i in resources]
class RoutineArgumentsDataType(object):
def __init__(
self,
type_kind: str = None,
array_element_type: dict = None,
struct_type: dict = None,
):
self.type_kind = type_kind
self.array_element_type = array_element_type
self.struct_type = struct_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataType()
if RoutineArgumentsDataTypeTypeKindEnum.to_proto(resource.type_kind):
res.type_kind = RoutineArgumentsDataTypeTypeKindEnum.to_proto(
resource.type_kind
)
if RoutineArgumentsDataType.to_proto(resource.array_element_type):
res.array_element_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.array_element_type)
)
else:
res.ClearField("array_element_type")
if RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type):
res.struct_type.CopyFrom(
RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type)
)
else:
res.ClearField("struct_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataType(
type_kind=RoutineArgumentsDataTypeTypeKindEnum.from_proto(
resource.type_kind
),
array_element_type=RoutineArgumentsDataType.from_proto(
resource.array_element_type
),
struct_type=RoutineArgumentsDataTypeStructType.from_proto(
resource.struct_type
),
)
class RoutineArgumentsDataTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructType(object):
def __init__(self, fields: list = None):
self.fields = fields
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataTypeStructType()
if RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields):
res.fields.extend(
RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructType(
fields=RoutineArgumentsDataTypeStructTypeFieldsArray.from_proto(
resource.fields
),
)
class RoutineArgumentsDataTypeStructTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataTypeStructType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructTypeFields(object):
def __init__(self, name: str = None, type: dict = None):
self.name = name
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryBetaRoutineArgumentsDataTypeStructTypeFields()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsDataType.to_proto(resource.type):
res.type.CopyFrom(RoutineArgumentsDataType.to_proto(resource.type))
else:
res.ClearField("type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructTypeFields(
name=Primitive.from_proto(resource.name),
type=RoutineArgumentsDataType.from_proto(resource.type),
)
class RoutineArgumentsDataTypeStructTypeFieldsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructTypeFields.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
RoutineArgumentsDataTypeStructTypeFields.from_proto(i) for i in resources
]
class RoutineRoutineTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineRoutineTypeEnum.Value(
"BigqueryBetaRoutineRoutineTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineRoutineTypeEnum.Name(resource)[
len("BigqueryBetaRoutineRoutineTypeEnum") :
]
class RoutineLanguageEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineLanguageEnum.Value(
"BigqueryBetaRoutineLanguageEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineLanguageEnum.Name(resource)[
len("BigqueryBetaRoutineLanguageEnum") :
]
class RoutineArgumentsArgumentKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsArgumentKindEnum.Value(
"BigqueryBetaRoutineArgumentsArgumentKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsArgumentKindEnum.Name(resource)[
len("BigqueryBetaRoutineArgumentsArgumentKindEnum") :
]
class RoutineArgumentsModeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsModeEnum.Value(
"BigqueryBetaRoutineArgumentsModeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsModeEnum.Name(resource)[
len("BigqueryBetaRoutineArgumentsModeEnum") :
]
class RoutineArgumentsDataTypeTypeKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum.Value(
"BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum.Name(
resource
)[len("BigqueryBetaRoutineArgumentsDataTypeTypeKindEnum") :]
class RoutineDeterminismLevelEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineDeterminismLevelEnum.Value(
"BigqueryBetaRoutineDeterminismLevelEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryBetaRoutineDeterminismLevelEnum.Name(resource)[
len("BigqueryBetaRoutineDeterminismLevelEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| true | true |
1c3218445516a8e8e61d1a56cba390d2063fdbb0 | 7,575 | py | Python | Network.py | qingqinl/Movie_recommendation_system | 8896813b56a02e80c30dec845cc9ffb9d946426a | [
"MIT"
] | 4 | 2019-05-07T13:57:44.000Z | 2021-05-04T10:00:20.000Z | Network.py | qingqinl/Movie_recommendation_system | 8896813b56a02e80c30dec845cc9ffb9d946426a | [
"MIT"
] | null | null | null | Network.py | qingqinl/Movie_recommendation_system | 8896813b56a02e80c30dec845cc9ffb9d946426a | [
"MIT"
] | 5 | 2018-04-21T08:02:11.000Z | 2019-05-07T13:58:44.000Z | from model_Init import *
## 定义User的嵌入矩阵
def get_user_embedding(uid, user_gender, user_age, user_job):
with tf.name_scope("user_embedding"):
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name = "uid_embed_matrix")
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name = "uid_embed_layer")
gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1), name= "gender_embed_matrix")
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name = "gender_embed_layer")
age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name="age_embed_matrix")
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name="age_embed_layer")
job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name = "job_embed_matrix")
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name = "job_embed_layer")
return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer
## 将user嵌入矩阵全连接生成特征
def get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):
with tf.name_scope("user_fc"):
#第一层全连接
uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name = "uid_fc_layer", activation=tf.nn.relu)
gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name = "gender_fc_layer", activation=tf.nn.relu)
age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name ="age_fc_layer", activation=tf.nn.relu)
job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name = "job_fc_layer", activation=tf.nn.relu)
#第二层全连接
user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2) #(?, 1, 128)
user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh) #(?, 1, 200)
user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])
return user_combine_layer, user_combine_layer_flat
## 定义movie ID的嵌入矩阵
def get_movie_id_embed_layer(movie_id):
with tf.name_scope("movie_embedding"):
movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = "movie_id_embed_matrix")
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = "movie_id_embed_layer")
return movie_id_embed_layer
## 对电影类型的多个嵌入向量做加和
def get_movie_categories_layers(movie_categories):
with tf.name_scope("movie_categories_layers"):
movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = "movie_categories_embed_matrix")
movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = "movie_categories_embed_layer")
if combiner == "sum":
movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)
return movie_categories_embed_layer
## movies title的文本卷积网络实现
def get_movie_cnn_layer(movie_titles):
#从嵌入矩阵中得到电影名对应的各个单词的嵌入向量
with tf.name_scope("movie_embedding"):
movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1), name = "movie_title_embed_matrix")
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles, name = "movie_title_embed_layer")
movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)
#对文本嵌入层使用不同尺寸的卷积核做卷积和最大池化
pool_layer_lst = []
for window_size in window_sizes:
with tf.name_scope("movie_txt_conv_maxpool_{}".format(window_size)):
filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num],stddev=0.1),name = "filter_weights")
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1,1,1,1], padding="VALID", name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer,filter_bias), name ="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1,sentences_size - window_size + 1 ,1,1], [1,1,1,1], padding="VALID", name="maxpool_layer")
pool_layer_lst.append(maxpool_layer)
#Dropout层
with tf.name_scope("pool_dropout"):
pool_layer = tf.concat(pool_layer_lst, 3, name ="pool_layer")
max_num = len(window_sizes) * filter_num
pool_layer_flat = tf.reshape(pool_layer , [-1, 1, max_num], name = "pool_layer_flat")
dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name = "dropout_layer")
return pool_layer_flat, dropout_layer
## 将movie的各个层做全连接
def get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):
with tf.name_scope("movie_fc"):
#第一层全连接
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name = "movie_id_fc_layer", activation=tf.nn.relu)
movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim, name = "movie_categories_fc_layer", activation=tf.nn.relu)
#第二层全连接
movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2) #(?, 1, 96)
movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh) #(?, 1, 200)
movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])
return movie_combine_layer, movie_combine_layer_flat
## 构建计算图
#def calcGraph():
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
#获取输入占位符
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()
#获取User的4个嵌入向量
uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)
#得到用户特征
user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)
#获取电影ID的嵌入向量
movie_id_embed_layer = get_movie_id_embed_layer(movie_id)
#获取电影类型的嵌入向量
movie_categories_embed_layer = get_movie_categories_layers(movie_categories)
#获取电影名的特征向量
pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)
#得到电影特征
movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,
movie_categories_embed_layer,
dropout_layer)
#计算出评分,要注意两个不同的方案,inference的名字(name值)是不一样的,后面做推荐时要根据name取得tensor
with tf.name_scope("inference"):
#将用户特征和电影特征作为输入,经过全连接,输出一个值的方案
#inference_layer = tf.concat([user_combine_layer_flat, movie_combine_layer_flat], 1) #(?, 200)
# inference = tf.layers.dense(inference_layer, 1,
# kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
# kernel_regularizer=tf.nn.l2_loss, name="inference")
#简单的将用户特征和电影特征做矩阵乘法得到一个预测评分
# inference = tf.matmul(user_combine_layer_flat, tf.transpose(movie_combine_layer_flat))
inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)
inference = tf.expand_dims(inference, axis=1)
with tf.name_scope("loss"):
# MSE损失,将计算值回归到评分
cost = tf.losses.mean_squared_error(targets, inference )
loss = tf.reduce_mean(cost)
# 优化损失
#train_op = tf.train.AdamOptimizer(lr).minimize(loss) #cost
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss) #cost
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
print(inference)
| 45.63253 | 146 | 0.777954 | from model_Init import *
_embedding(uid, user_gender, user_age, user_job):
with tf.name_scope("user_embedding"):
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name = "uid_embed_matrix")
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name = "uid_embed_layer")
gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1), name= "gender_embed_matrix")
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name = "gender_embed_layer")
age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name="age_embed_matrix")
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name="age_embed_layer")
job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name = "job_embed_matrix")
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name = "job_embed_layer")
return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer
ure_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):
with tf.name_scope("user_fc"):
uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name = "uid_fc_layer", activation=tf.nn.relu)
gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name = "gender_fc_layer", activation=tf.nn.relu)
age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name ="age_fc_layer", activation=tf.nn.relu)
job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name = "job_fc_layer", activation=tf.nn.relu)
user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2)
user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh)
user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])
return user_combine_layer, user_combine_layer_flat
_embed_layer(movie_id):
with tf.name_scope("movie_embedding"):
movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = "movie_id_embed_matrix")
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = "movie_id_embed_layer")
return movie_id_embed_layer
tegories_layers(movie_categories):
with tf.name_scope("movie_categories_layers"):
movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = "movie_categories_embed_matrix")
movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = "movie_categories_embed_layer")
if combiner == "sum":
movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)
return movie_categories_embed_layer
r(movie_titles):
with tf.name_scope("movie_embedding"):
movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1), name = "movie_title_embed_matrix")
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles, name = "movie_title_embed_layer")
movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)
pool_layer_lst = []
for window_size in window_sizes:
with tf.name_scope("movie_txt_conv_maxpool_{}".format(window_size)):
filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num],stddev=0.1),name = "filter_weights")
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1,1,1,1], padding="VALID", name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer,filter_bias), name ="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1,sentences_size - window_size + 1 ,1,1], [1,1,1,1], padding="VALID", name="maxpool_layer")
pool_layer_lst.append(maxpool_layer)
with tf.name_scope("pool_dropout"):
pool_layer = tf.concat(pool_layer_lst, 3, name ="pool_layer")
max_num = len(window_sizes) * filter_num
pool_layer_flat = tf.reshape(pool_layer , [-1, 1, max_num], name = "pool_layer_flat")
dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name = "dropout_layer")
return pool_layer_flat, dropout_layer
eature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):
with tf.name_scope("movie_fc"):
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name = "movie_id_fc_layer", activation=tf.nn.relu)
movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim, name = "movie_categories_fc_layer", activation=tf.nn.relu)
movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2)
movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh)
movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])
return movie_combine_layer, movie_combine_layer_flat
.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()
uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)
user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)
movie_id_embed_layer = get_movie_id_embed_layer(movie_id)
movie_categories_embed_layer = get_movie_categories_layers(movie_categories)
pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)
movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,
movie_categories_embed_layer,
dropout_layer)
with tf.name_scope("inference"):
inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)
inference = tf.expand_dims(inference, axis=1)
with tf.name_scope("loss"):
cost = tf.losses.mean_squared_error(targets, inference )
loss = tf.reduce_mean(cost)
bal_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
print(inference)
| true | true |
1c3218e98425eccd4ffcd85d939ce07c12783071 | 4,990 | py | Python | xpython/byteop/byteop26.py | rocky/xpython | ce4ed4329cee2af0aab94254276f5a5687dd25f9 | [
"MIT"
] | 1 | 2020-04-28T13:18:13.000Z | 2020-04-28T13:18:13.000Z | xpython/byteop/byteop26.py | rocky/xbyterun | fde8f8a31ffd3e3c4545d76b4b1edf4b7e0191d9 | [
"MIT"
] | null | null | null | xpython/byteop/byteop26.py | rocky/xbyterun | fde8f8a31ffd3e3c4545d76b4b1edf4b7e0191d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Bytecode Interpreter operations for Python 2.6
Note: this is subclassed so later versions may use operations from here.
"""
import os
import sys
import xpython.stdlib
from xdis.version_info import PYTHON_VERSION_TRIPLE
try:
import importlib
except ImportError:
importlib = None
from xpython.byteop.byteop import fmt_binary_op
from xpython.byteop.byteop24 import fmt_make_function, Version_info
from xpython.byteop.byteop25 import ByteOp25
from xpython.pyobj import Function
class ByteOp26(ByteOp25):
def __init__(self, vm):
super(ByteOp26, self).__init__(vm)
self.stack_fmt["IMPORT_NAME"] = fmt_binary_op
self.stack_fmt["MAKE_CLOSURE"] = fmt_make_function
# Fake up version information
self.hexversion = 0x20609F0
self.version = "2.6.9 (default, Oct 27 1955, 00:00:00)\n[x-python]"
self.version_info = Version_info(2, 6, 9, "final", 0)
# Right now 2.6 is largely the same as 2.5 here. How nice!
def IMPORT_NAME(self, name):
"""
Imports the module co_names[namei]. TOS and TOS1 are popped and
provide the fromlist and level arguments of __import__(). The
module object is pushed onto the stack. The current namespace
is not affected: for a proper import statement, a subsequent
STORE_FAST instruction modifies the namespace.
Note: name = co_names[namei] set in parse_byte_and_args()
"""
level, fromlist = self.vm.popn(2)
frame = self.vm.frame
# Should we replace import "name" with a compatabliity version?
if name in xpython.stdlib.__all__:
name = f"xpython.stdlib.{name}"
# if importlib is not None:
# module_spec = importlib.util.find_spec(name)
# module = importlib.util.module_from_spec(module_spec)
# load_module = (
# module_spec.loader.exec_module
# if hasattr(module_spec.loader, "exec_module")
# else module_spec.loader.load_module
# )
# load_module(module)
# elif PYTHON_VERSION_TRIPLE >= (3, 0):
# # This should make a *copy* of the module so we keep interpreter and
# # interpreted programs separate.
# # See below for how we handle "sys" import
# # FIXME: should split on ".". Doesn't work for, say, os.path
# if level < 0:
# level = 0
# module = importlib.__import__(
# name, frame.f_globals, frame.f_locals, fromlist, level
# )
# else:
# module = __import__(name, frame.f_globals, frame.f_locals, fromlist, level)
# INVESTIGATE: the above doesn't work for things like "import os.path as osp"
# The module it finds ins os.posixpath which doesn't have a "path" attribute
# while the below finds "os" which does have a "path" attribute.
#
assert level >= -1, f"Invalid Level number {level} on IMPORT_NAME"
module = None
if level == -1:
# In Python 2.6 added the level parameter and it was -1 by default until but not including 3.0.
# -1 means try relative imports before absolute imports.
if PYTHON_VERSION_TRIPLE >= (3, 0):
# FIXME: give warning that we can't handle absolute import. Or fix up code to handle possible absolute import.
level = 0
else:
module = __import__(
"." + os.sep + name,
frame.f_globals,
frame.f_locals,
fromlist,
level,
)
if module is None:
module = __import__(name, frame.f_globals, frame.f_locals, fromlist, level)
# FIXME: generalize this
if name in sys.builtin_module_names:
# FIXME: do more here.
if PYTHON_VERSION_TRIPLE[:2] != self.version_info[:2]:
if name == "sys":
module.version_info = self.version_info
module.version = self.version
pass
pass
self.vm.push(module)
def MAKE_CLOSURE(self, argc: int):
"""
Creates a new function object, sets its func_closure slot, and
pushes it on the stack. TOS is the code associated with the
function. If the code object has N free variables, the next N
items on the stack are the cells for these variables. The
function also has argc default parameters, where are found
before the cells.
"""
if self.version_info[:2] >= (3, 3):
name = self.vm.pop()
else:
name = None
closure, code = self.vm.popn(2)
defaults = self.vm.popn(argc)
globs = self.vm.frame.f_globals
fn = Function(name, code, globs, defaults, closure, self.vm)
self.vm.push(fn)
| 38.091603 | 126 | 0.597996 |
import os
import sys
import xpython.stdlib
from xdis.version_info import PYTHON_VERSION_TRIPLE
try:
import importlib
except ImportError:
importlib = None
from xpython.byteop.byteop import fmt_binary_op
from xpython.byteop.byteop24 import fmt_make_function, Version_info
from xpython.byteop.byteop25 import ByteOp25
from xpython.pyobj import Function
class ByteOp26(ByteOp25):
def __init__(self, vm):
super(ByteOp26, self).__init__(vm)
self.stack_fmt["IMPORT_NAME"] = fmt_binary_op
self.stack_fmt["MAKE_CLOSURE"] = fmt_make_function
self.hexversion = 0x20609F0
self.version = "2.6.9 (default, Oct 27 1955, 00:00:00)\n[x-python]"
self.version_info = Version_info(2, 6, 9, "final", 0)
def IMPORT_NAME(self, name):
level, fromlist = self.vm.popn(2)
frame = self.vm.frame
if name in xpython.stdlib.__all__:
name = f"xpython.stdlib.{name}"
# )
# else:
# module = __import__(name, frame.f_globals, frame.f_locals, fromlist, level)
# INVESTIGATE: the above doesn't work for things like "import os.path as osp"
# while the below finds "os" which does have a "path" attribute.
#
assert level >= -1, f"Invalid Level number {level} on IMPORT_NAME"
module = None
if level == -1:
# In Python 2.6 added the level parameter and it was -1 by default until but not including 3.0.
# -1 means try relative imports before absolute imports.
if PYTHON_VERSION_TRIPLE >= (3, 0):
# FIXME: give warning that we can't handle absolute import. Or fix up code to handle possible absolute import.
level = 0
else:
module = __import__(
"." + os.sep + name,
frame.f_globals,
frame.f_locals,
fromlist,
level,
)
if module is None:
module = __import__(name, frame.f_globals, frame.f_locals, fromlist, level)
if name in sys.builtin_module_names:
if PYTHON_VERSION_TRIPLE[:2] != self.version_info[:2]:
if name == "sys":
module.version_info = self.version_info
module.version = self.version
pass
pass
self.vm.push(module)
def MAKE_CLOSURE(self, argc: int):
if self.version_info[:2] >= (3, 3):
name = self.vm.pop()
else:
name = None
closure, code = self.vm.popn(2)
defaults = self.vm.popn(argc)
globs = self.vm.frame.f_globals
fn = Function(name, code, globs, defaults, closure, self.vm)
self.vm.push(fn)
| true | true |
1c32191e5989ca9ffe4e2cced12fb25f432a9b47 | 45 | py | Python | CodingBat/Warmup-2/string_times.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | CodingBat/Warmup-2/string_times.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | CodingBat/Warmup-2/string_times.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | def string_times(str, n):
return str * n
| 15 | 25 | 0.644444 | def string_times(str, n):
return str * n
| true | true |
1c3219f35ecf247590dfae96ca5c7f08de0a65c4 | 563 | py | Python | locust_demo/example2.py | jmkhael/locust_demo | d5c82bc56cd3c5d30a8944d796f88f093e058182 | [
"MIT"
] | null | null | null | locust_demo/example2.py | jmkhael/locust_demo | d5c82bc56cd3c5d30a8944d796f88f093e058182 | [
"MIT"
] | 1 | 2021-03-24T21:44:45.000Z | 2021-03-24T21:44:45.000Z | locust_demo/example2.py | jmkhael/locust_demo | d5c82bc56cd3c5d30a8944d796f88f093e058182 | [
"MIT"
] | null | null | null | import random
from locust import HttpUser, task, between
class MyTaskSet(HttpUser):
wait_time = between(5, 9)
def on_start(self):
res = self.client.post('login',
{
"username": 'admin',
"password": 'default'
})
res.raise_for_status()
@task(1)
def index(self):
self.client.get("/")
@task(1)
def entry(self):
entry = random.randint(1, 6)
self.client.get(f"/entry/{entry}", name="Entry")
| 21.653846 | 56 | 0.48135 | import random
from locust import HttpUser, task, between
class MyTaskSet(HttpUser):
wait_time = between(5, 9)
def on_start(self):
res = self.client.post('login',
{
"username": 'admin',
"password": 'default'
})
res.raise_for_status()
@task(1)
def index(self):
self.client.get("/")
@task(1)
def entry(self):
entry = random.randint(1, 6)
self.client.get(f"/entry/{entry}", name="Entry")
| true | true |
1c321c8b04ced57168a44d64a949a88e3dc7091a | 623 | py | Python | test.py | bitscuit/Text-Deduplication | 7f9921ea7ca01a56557b4145daede7f59258f02e | [
"MIT"
] | null | null | null | test.py | bitscuit/Text-Deduplication | 7f9921ea7ca01a56557b4145daede7f59258f02e | [
"MIT"
] | null | null | null | test.py | bitscuit/Text-Deduplication | 7f9921ea7ca01a56557b4145daede7f59258f02e | [
"MIT"
] | null | null | null | import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
string1 = 'this is a sample sentence to test similarity'
string2 = 'this is a sample sentence to test similarity too'
t = Tokenizer()
t.fit_on_texts([string1])
t.fit_on_texts([string2])
sequence1 = t.texts_to_sequences([string1])
sequence2 = t.texts_to_sequences([string2])
padded1 = pad_sequences(sequence1, maxlen=10)
padded2 = pad_sequences(sequence2, maxlen=10)
model = keras.models.load_model('./data/SiameseLSTM.h5', custom_objects={'ManDist': ManDist})
y_pred = model.predict([padded1, padded2])
| 31.15 | 93 | 0.784912 | import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
string1 = 'this is a sample sentence to test similarity'
string2 = 'this is a sample sentence to test similarity too'
t = Tokenizer()
t.fit_on_texts([string1])
t.fit_on_texts([string2])
sequence1 = t.texts_to_sequences([string1])
sequence2 = t.texts_to_sequences([string2])
padded1 = pad_sequences(sequence1, maxlen=10)
padded2 = pad_sequences(sequence2, maxlen=10)
model = keras.models.load_model('./data/SiameseLSTM.h5', custom_objects={'ManDist': ManDist})
y_pred = model.predict([padded1, padded2])
| true | true |
1c321d2118d7f632e4969aca96520b34012f982a | 3,427 | py | Python | imperative/python/megengine/optimizer/adadelta.py | chenls/MegEngine | 3f783aba4b81ab628ad911d0c66a49d163a8aaf6 | [
"Apache-2.0"
] | 3 | 2021-08-08T12:55:53.000Z | 2021-12-10T06:01:04.000Z | imperative/python/megengine/optimizer/adadelta.py | MediosZ/MegEngine | 5c775d02dd0b8f20b5acc6b400cf722e92f2e86b | [
"Apache-2.0"
] | 6 | 2020-04-24T08:52:06.000Z | 2021-08-16T06:38:23.000Z | imperative/python/megengine/optimizer/adadelta.py | MediosZ/MegEngine | 5c775d02dd0b8f20b5acc6b400cf722e92f2e86b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Iterable, Union
import numpy as np
from ..tensor import Parameter, tensor
from .optimizer import Optimizer
class Adadelta(Optimizer):
r"""
Implements Adadelta algorithm.
It has been proposed in `"ADADELTA: An Adaptive Learning Rate Method" <https://arxiv.org/abs/1212.5701>`_.
:param params: iterable of parameters to optimize or dicts defining
parameter groups.
:param lr: coefficient that scales delta before it is applied
to the parameters. Default: 1.0
:param rho: coefficient used for computing a running average
of squared gradients. Default: 0.9
:param eps: term added to the denominator to improve
numerical stability. Default: 1e-6
:param weight_decay: weight decay (L2 penalty). Default: 0
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float = 1.0,
rho: float = 0.9,
eps: float = 1e-6,
weight_decay: float = 0.0,
):
assert lr >= 0.0, "Invalid learning rate: {}".format(lr)
assert rho >= 0.0 and rho <= 1.0, "Invalid rho value: {}".format(rho)
assert eps >= 0.0, "Invalid epsilon value: {}".format(eps)
assert weight_decay >= 0.0, "Invalid weight_decay value: {}".format(
weight_decay
)
defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
def _create_state(self, param_group):
for param in param_group["params"]:
self._add_state(param, "square_avg")
self._add_state(param, "acc_delta")
self._add_state(param, "step", initializer=0.0)
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
rho = param_group["rho"]
eps = param_group["eps"]
def make_scalar(val):
return tensor(val)
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = make_scalar(lr)
_weight_decay = make_scalar(weight_decay)
_rho = make_scalar(rho)
_eps = make_scalar(eps)
c1, c2, c05 = map(make_scalar, (1.0, 2.0, 0.5))
for param in param_group["params"]:
if param.grad is None:
continue
states = self._state[param]
step = states["step"]
step += c1
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
square_avg = states["square_avg"]
acc_delta = states["acc_delta"]
square_avg = _rho * square_avg + (c1 - _rho) * grad ** c2
std = (square_avg + _eps) ** c05
delta = (acc_delta + _eps) ** c05 / std * grad
param -= _lr * delta
acc_delta = _rho * acc_delta + (c1 - _rho) * delta ** c2
states["square_avg"]._reset(square_avg)
states["acc_delta"]._reset(acc_delta)
| 34.969388 | 110 | 0.610155 |
from typing import Iterable, Union
import numpy as np
from ..tensor import Parameter, tensor
from .optimizer import Optimizer
class Adadelta(Optimizer):
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float = 1.0,
rho: float = 0.9,
eps: float = 1e-6,
weight_decay: float = 0.0,
):
assert lr >= 0.0, "Invalid learning rate: {}".format(lr)
assert rho >= 0.0 and rho <= 1.0, "Invalid rho value: {}".format(rho)
assert eps >= 0.0, "Invalid epsilon value: {}".format(eps)
assert weight_decay >= 0.0, "Invalid weight_decay value: {}".format(
weight_decay
)
defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
def _create_state(self, param_group):
for param in param_group["params"]:
self._add_state(param, "square_avg")
self._add_state(param, "acc_delta")
self._add_state(param, "step", initializer=0.0)
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
rho = param_group["rho"]
eps = param_group["eps"]
def make_scalar(val):
return tensor(val)
_lr = make_scalar(lr)
_weight_decay = make_scalar(weight_decay)
_rho = make_scalar(rho)
_eps = make_scalar(eps)
c1, c2, c05 = map(make_scalar, (1.0, 2.0, 0.5))
for param in param_group["params"]:
if param.grad is None:
continue
states = self._state[param]
step = states["step"]
step += c1
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
square_avg = states["square_avg"]
acc_delta = states["acc_delta"]
square_avg = _rho * square_avg + (c1 - _rho) * grad ** c2
std = (square_avg + _eps) ** c05
delta = (acc_delta + _eps) ** c05 / std * grad
param -= _lr * delta
acc_delta = _rho * acc_delta + (c1 - _rho) * delta ** c2
states["square_avg"]._reset(square_avg)
states["acc_delta"]._reset(acc_delta)
| true | true |
1c321e0d1e6c72eea8bae445367a295f62188a6b | 4,613 | py | Python | projects_oss/detr/detr/d2/dataset_mapper.py | Pandinosaurus/d2go | fd79c680749184509efb2017d478d8c00656bbe2 | [
"Apache-2.0"
] | null | null | null | projects_oss/detr/detr/d2/dataset_mapper.py | Pandinosaurus/d2go | fd79c680749184509efb2017d478d8c00656bbe2 | [
"Apache-2.0"
] | null | null | null | projects_oss/detr/detr/d2/dataset_mapper.py | Pandinosaurus/d2go | fd79c680749184509efb2017d478d8c00656bbe2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 36.904 | 111 | 0.638413 |
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2]
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| true | true |
1c321e9f6ae93086f8db8edb4eaff14d6eb11ecc | 523 | py | Python | odoo-13.0/addons/website_event_sale/__manifest__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/website_event_sale/__manifest__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/website_event_sale/__manifest__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
{
'name': "Online Event Ticketing",
'category': 'Website/Website',
'summary': "Sell event tickets online",
'description': """
Sell event tickets through eCommerce app.
""",
'depends': ['website_event', 'event_sale', 'website_sale'],
'data': [
'data/event_data.xml',
'views/event_templates.xml',
'views/event_views.xml',
'security/ir.model.access.csv',
'security/website_event_sale_security.xml',
],
'auto_install': True
}
| 26.15 | 63 | 0.600382 |
{
'name': "Online Event Ticketing",
'category': 'Website/Website',
'summary': "Sell event tickets online",
'description': """
Sell event tickets through eCommerce app.
""",
'depends': ['website_event', 'event_sale', 'website_sale'],
'data': [
'data/event_data.xml',
'views/event_templates.xml',
'views/event_views.xml',
'security/ir.model.access.csv',
'security/website_event_sale_security.xml',
],
'auto_install': True
}
| true | true |
1c321f14440a1f1ca4249e457c5620d7f377ce0f | 13,615 | py | Python | cvat/apps/tf_annotation/views.py | lravindr/cvat | b025acea43fbb55c7ea7eac7b12007f0eb6d3f45 | [
"MIT"
] | 1 | 2020-07-19T08:15:20.000Z | 2020-07-19T08:15:20.000Z | cvat/apps/tf_annotation/views.py | lravindr/cvat | b025acea43fbb55c7ea7eac7b12007f0eb6d3f45 | [
"MIT"
] | 17 | 2020-11-13T18:58:43.000Z | 2022-02-27T08:06:04.000Z | cvat/apps/tf_annotation/views.py | lravindr/cvat | b025acea43fbb55c7ea7eac7b12007f0eb6d3f45 | [
"MIT"
] | 4 | 2021-09-03T13:13:40.000Z | 2022-03-04T18:19:38.000Z |
# Copyright (C) 2018-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest
from rest_framework.decorators import api_view
from rules.contrib.views import permission_required, objectgetter
from cvat.apps.authentication.decorators import login_required
from cvat.apps.dataset_manager.task import put_task_data
from cvat.apps.engine.models import Task as TaskModel
from cvat.apps.engine.serializers import LabeledDataSerializer
from cvat.apps.engine.frame_provider import FrameProvider
import django_rq
import os
import rq
import tensorflow as tf
import numpy as np
from PIL import Image
from cvat.apps.engine.log import slogger
def load_image_into_numpy(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run_inference_engine_annotation(image_list, labels_mapping, treshold):
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
def _normalize_box(box, w, h, dw, dh):
xmin = min(int(box[0] * dw * w), w)
ymin = min(int(box[1] * dh * h), h)
xmax = min(int(box[2] * dw * w), w)
ymax = min(int(box[3] * dh * h), h)
return xmin, ymin, xmax, ymax
result = {}
MODEL_PATH = os.environ.get('TF_ANNOTATION_MODEL_PATH')
if MODEL_PATH is None:
raise OSError('Model path env not found in the system.')
core_or_plugin = make_plugin_or_core()
network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH))
input_blob_name = next(iter(network.inputs))
output_blob_name = next(iter(network.outputs))
if getattr(core_or_plugin, 'load_network', False):
executable_network = core_or_plugin.load_network(network, 'CPU')
else:
executable_network = core_or_plugin.load(network=network)
job = rq.get_current_job()
del network
try:
for image_num, im_name in enumerate(image_list):
job.refresh()
if 'cancel' in job.meta:
del job.meta['cancel']
job.save()
return None
job.meta['progress'] = image_num * 100 / len(image_list)
job.save_meta()
image = Image.open(im_name)
width, height = image.size
image.thumbnail((600, 600), Image.ANTIALIAS)
dwidth, dheight = 600 / image.size[0], 600 / image.size[1]
image = image.crop((0, 0, 600, 600))
image_np = load_image_into_numpy(image)
image_np = np.transpose(image_np, (2, 0, 1))
prediction = executable_network.infer(inputs={input_blob_name: image_np[np.newaxis, ...]})[output_blob_name][0][0]
for obj in prediction:
obj_class = int(obj[1])
obj_value = obj[2]
if obj_class and obj_class in labels_mapping and obj_value >= treshold:
label = labels_mapping[obj_class]
if label not in result:
result[label] = []
xmin, ymin, xmax, ymax = _normalize_box(obj[3:7], width, height, dwidth, dheight)
result[label].append([image_num, xmin, ymin, xmax, ymax])
finally:
del executable_network
del plugin
return result
def run_tensorflow_annotation(frame_provider, labels_mapping, treshold):
def _normalize_box(box, w, h):
xmin = int(box[1] * w)
ymin = int(box[0] * h)
xmax = int(box[3] * w)
ymax = int(box[2] * h)
return xmin, ymin, xmax, ymax
result = {}
model_path = os.environ.get('TF_ANNOTATION_MODEL_PATH')
if model_path is None:
raise OSError('Model path env not found in the system.')
job = rq.get_current_job()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path + '.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
try:
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(graph=detection_graph, config=config)
frames = frame_provider.get_frames(frame_provider.Quality.ORIGINAL)
for image_num, (image, _) in enumerate(frames):
job.refresh()
if 'cancel' in job.meta:
del job.meta['cancel']
job.save()
return None
job.meta['progress'] = image_num * 100 / len(frame_provider)
job.save_meta()
image = Image.open(image)
width, height = image.size
if width > 1920 or height > 1080:
image = image.resize((width // 2, height // 2), Image.ANTIALIAS)
image_np = load_image_into_numpy(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded})
for i in range(len(classes[0])):
if classes[0][i] in labels_mapping.keys():
if scores[0][i] >= treshold:
xmin, ymin, xmax, ymax = _normalize_box(boxes[0][i], width, height)
label = labels_mapping[classes[0][i]]
if label not in result:
result[label] = []
result[label].append([image_num, xmin, ymin, xmax, ymax])
finally:
sess.close()
del sess
return result
def convert_to_cvat_format(data):
result = {
"tracks": [],
"shapes": [],
"tags": [],
"version": 0,
}
for label in data:
boxes = data[label]
for box in boxes:
result['shapes'].append({
"type": "rectangle",
"label_id": label,
"frame": box[0],
"points": [box[1], box[2], box[3], box[4]],
"z_order": 0,
"group": None,
"occluded": False,
"attributes": [],
})
return result
def create_thread(tid, labels_mapping, user):
try:
TRESHOLD = 0.5
# Init rq job
job = rq.get_current_job()
job.meta['progress'] = 0
job.save_meta()
# Get job indexes and segment length
db_task = TaskModel.objects.get(pk=tid)
# Get image list
image_list = FrameProvider(db_task.data)
# Run auto annotation by tf
result = None
slogger.glob.info("tf annotation with tensorflow framework for task {}".format(tid))
result = run_tensorflow_annotation(image_list, labels_mapping, TRESHOLD)
if result is None:
slogger.glob.info('tf annotation for task {} canceled by user'.format(tid))
return
# Modify data format and save
result = convert_to_cvat_format(result)
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
put_task_data(tid, result)
slogger.glob.info('tf annotation for task {} done'.format(tid))
except Exception as ex:
try:
slogger.task[tid].exception('exception was occured during tf annotation of the task', exc_info=True)
except:
slogger.glob.exception('exception was occured during tf annotation of the task {}'.format(tid), exc_info=True)
raise ex
@api_view(['POST'])
@login_required
def get_meta_info(request):
try:
queue = django_rq.get_queue('low')
tids = request.data
result = {}
for tid in tids:
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None:
result[tid] = {
"active": job.is_queued or job.is_started,
"success": not job.is_failed
}
return JsonResponse(result)
except Exception as ex:
slogger.glob.exception('exception was occured during tf meta request', exc_info=True)
return HttpResponseBadRequest(str(ex))
@login_required
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def create(request, tid):
slogger.glob.info('tf annotation create request for task {}'.format(tid))
try:
db_task = TaskModel.objects.get(pk=tid)
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and (job.is_started or job.is_queued):
raise Exception("The process is already running")
db_labels = db_task.label_set.prefetch_related('attributespec_set').all()
db_labels = {db_label.id:db_label.name for db_label in db_labels}
tf_annotation_labels = {
"person": 1, "bicycle": 2, "car": 3, "motorcycle": 4, "airplane": 5,
"bus": 6, "train": 7, "truck": 8, "boat": 9, "traffic_light": 10,
"fire_hydrant": 11, "stop_sign": 13, "parking_meter": 14, "bench": 15,
"bird": 16, "cat": 17, "dog": 18, "horse": 19, "sheep": 20, "cow": 21,
"elephant": 22, "bear": 23, "zebra": 24, "giraffe": 25, "backpack": 27,
"umbrella": 28, "handbag": 31, "tie": 32, "suitcase": 33, "frisbee": 34,
"skis": 35, "snowboard": 36, "sports_ball": 37, "kite": 38, "baseball_bat": 39,
"baseball_glove": 40, "skateboard": 41, "surfboard": 42, "tennis_racket": 43,
"bottle": 44, "wine_glass": 46, "cup": 47, "fork": 48, "knife": 49, "spoon": 50,
"bowl": 51, "banana": 52, "apple": 53, "sandwich": 54, "orange": 55, "broccoli": 56,
"carrot": 57, "hot_dog": 58, "pizza": 59, "donut": 60, "cake": 61, "chair": 62,
"couch": 63, "potted_plant": 64, "bed": 65, "dining_table": 67, "toilet": 70,
"tv": 72, "laptop": 73, "mouse": 74, "remote": 75, "keyboard": 76, "cell_phone": 77,
"microwave": 78, "oven": 79, "toaster": 80, "sink": 81, "refrigerator": 83,
"book": 84, "clock": 85, "vase": 86, "scissors": 87, "teddy_bear": 88, "hair_drier": 89,
"toothbrush": 90
}
labels_mapping = {}
for key, labels in db_labels.items():
if labels in tf_annotation_labels.keys():
labels_mapping[tf_annotation_labels[labels]] = key
if not len(labels_mapping.values()):
raise Exception('No labels found for tf annotation')
# Run tf annotation job
queue.enqueue_call(func=create_thread,
args=(tid, labels_mapping, request.user),
job_id='tf_annotation.create/{}'.format(tid),
timeout=604800) # 7 days
slogger.task[tid].info('tensorflow annotation job enqueued with labels {}'.format(labels_mapping))
except Exception as ex:
try:
slogger.task[tid].exception("exception was occured during tensorflow annotation request", exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse()
@login_required
@permission_required(perm=['engine.task.access'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def check(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and 'cancel' in job.meta:
return JsonResponse({'status': 'finished'})
data = {}
if job is None:
data['status'] = 'unknown'
elif job.is_queued:
data['status'] = 'queued'
elif job.is_started:
data['status'] = 'started'
data['progress'] = job.meta['progress']
elif job.is_finished:
data['status'] = 'finished'
job.delete()
else:
data['status'] = 'failed'
data['stderr'] = job.exc_info
job.delete()
except Exception:
data['status'] = 'unknown'
return JsonResponse(data)
@login_required
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def cancel(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is None or job.is_finished or job.is_failed:
raise Exception('Task is not being annotated currently')
elif 'cancel' not in job.meta:
job.meta['cancel'] = True
job.save()
except Exception as ex:
try:
slogger.task[tid].exception("cannot cancel tensorflow annotation for task #{}".format(tid), exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse()
| 39.236311 | 154 | 0.593537 |
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest
from rest_framework.decorators import api_view
from rules.contrib.views import permission_required, objectgetter
from cvat.apps.authentication.decorators import login_required
from cvat.apps.dataset_manager.task import put_task_data
from cvat.apps.engine.models import Task as TaskModel
from cvat.apps.engine.serializers import LabeledDataSerializer
from cvat.apps.engine.frame_provider import FrameProvider
import django_rq
import os
import rq
import tensorflow as tf
import numpy as np
from PIL import Image
from cvat.apps.engine.log import slogger
def load_image_into_numpy(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run_inference_engine_annotation(image_list, labels_mapping, treshold):
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
def _normalize_box(box, w, h, dw, dh):
xmin = min(int(box[0] * dw * w), w)
ymin = min(int(box[1] * dh * h), h)
xmax = min(int(box[2] * dw * w), w)
ymax = min(int(box[3] * dh * h), h)
return xmin, ymin, xmax, ymax
result = {}
MODEL_PATH = os.environ.get('TF_ANNOTATION_MODEL_PATH')
if MODEL_PATH is None:
raise OSError('Model path env not found in the system.')
core_or_plugin = make_plugin_or_core()
network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH))
input_blob_name = next(iter(network.inputs))
output_blob_name = next(iter(network.outputs))
if getattr(core_or_plugin, 'load_network', False):
executable_network = core_or_plugin.load_network(network, 'CPU')
else:
executable_network = core_or_plugin.load(network=network)
job = rq.get_current_job()
del network
try:
for image_num, im_name in enumerate(image_list):
job.refresh()
if 'cancel' in job.meta:
del job.meta['cancel']
job.save()
return None
job.meta['progress'] = image_num * 100 / len(image_list)
job.save_meta()
image = Image.open(im_name)
width, height = image.size
image.thumbnail((600, 600), Image.ANTIALIAS)
dwidth, dheight = 600 / image.size[0], 600 / image.size[1]
image = image.crop((0, 0, 600, 600))
image_np = load_image_into_numpy(image)
image_np = np.transpose(image_np, (2, 0, 1))
prediction = executable_network.infer(inputs={input_blob_name: image_np[np.newaxis, ...]})[output_blob_name][0][0]
for obj in prediction:
obj_class = int(obj[1])
obj_value = obj[2]
if obj_class and obj_class in labels_mapping and obj_value >= treshold:
label = labels_mapping[obj_class]
if label not in result:
result[label] = []
xmin, ymin, xmax, ymax = _normalize_box(obj[3:7], width, height, dwidth, dheight)
result[label].append([image_num, xmin, ymin, xmax, ymax])
finally:
del executable_network
del plugin
return result
def run_tensorflow_annotation(frame_provider, labels_mapping, treshold):
def _normalize_box(box, w, h):
xmin = int(box[1] * w)
ymin = int(box[0] * h)
xmax = int(box[3] * w)
ymax = int(box[2] * h)
return xmin, ymin, xmax, ymax
result = {}
model_path = os.environ.get('TF_ANNOTATION_MODEL_PATH')
if model_path is None:
raise OSError('Model path env not found in the system.')
job = rq.get_current_job()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path + '.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
try:
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(graph=detection_graph, config=config)
frames = frame_provider.get_frames(frame_provider.Quality.ORIGINAL)
for image_num, (image, _) in enumerate(frames):
job.refresh()
if 'cancel' in job.meta:
del job.meta['cancel']
job.save()
return None
job.meta['progress'] = image_num * 100 / len(frame_provider)
job.save_meta()
image = Image.open(image)
width, height = image.size
if width > 1920 or height > 1080:
image = image.resize((width // 2, height // 2), Image.ANTIALIAS)
image_np = load_image_into_numpy(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded})
for i in range(len(classes[0])):
if classes[0][i] in labels_mapping.keys():
if scores[0][i] >= treshold:
xmin, ymin, xmax, ymax = _normalize_box(boxes[0][i], width, height)
label = labels_mapping[classes[0][i]]
if label not in result:
result[label] = []
result[label].append([image_num, xmin, ymin, xmax, ymax])
finally:
sess.close()
del sess
return result
def convert_to_cvat_format(data):
result = {
"tracks": [],
"shapes": [],
"tags": [],
"version": 0,
}
for label in data:
boxes = data[label]
for box in boxes:
result['shapes'].append({
"type": "rectangle",
"label_id": label,
"frame": box[0],
"points": [box[1], box[2], box[3], box[4]],
"z_order": 0,
"group": None,
"occluded": False,
"attributes": [],
})
return result
def create_thread(tid, labels_mapping, user):
try:
TRESHOLD = 0.5
job = rq.get_current_job()
job.meta['progress'] = 0
job.save_meta()
db_task = TaskModel.objects.get(pk=tid)
image_list = FrameProvider(db_task.data)
result = None
slogger.glob.info("tf annotation with tensorflow framework for task {}".format(tid))
result = run_tensorflow_annotation(image_list, labels_mapping, TRESHOLD)
if result is None:
slogger.glob.info('tf annotation for task {} canceled by user'.format(tid))
return
result = convert_to_cvat_format(result)
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
put_task_data(tid, result)
slogger.glob.info('tf annotation for task {} done'.format(tid))
except Exception as ex:
try:
slogger.task[tid].exception('exception was occured during tf annotation of the task', exc_info=True)
except:
slogger.glob.exception('exception was occured during tf annotation of the task {}'.format(tid), exc_info=True)
raise ex
@api_view(['POST'])
@login_required
def get_meta_info(request):
try:
queue = django_rq.get_queue('low')
tids = request.data
result = {}
for tid in tids:
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None:
result[tid] = {
"active": job.is_queued or job.is_started,
"success": not job.is_failed
}
return JsonResponse(result)
except Exception as ex:
slogger.glob.exception('exception was occured during tf meta request', exc_info=True)
return HttpResponseBadRequest(str(ex))
@login_required
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def create(request, tid):
slogger.glob.info('tf annotation create request for task {}'.format(tid))
try:
db_task = TaskModel.objects.get(pk=tid)
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and (job.is_started or job.is_queued):
raise Exception("The process is already running")
db_labels = db_task.label_set.prefetch_related('attributespec_set').all()
db_labels = {db_label.id:db_label.name for db_label in db_labels}
tf_annotation_labels = {
"person": 1, "bicycle": 2, "car": 3, "motorcycle": 4, "airplane": 5,
"bus": 6, "train": 7, "truck": 8, "boat": 9, "traffic_light": 10,
"fire_hydrant": 11, "stop_sign": 13, "parking_meter": 14, "bench": 15,
"bird": 16, "cat": 17, "dog": 18, "horse": 19, "sheep": 20, "cow": 21,
"elephant": 22, "bear": 23, "zebra": 24, "giraffe": 25, "backpack": 27,
"umbrella": 28, "handbag": 31, "tie": 32, "suitcase": 33, "frisbee": 34,
"skis": 35, "snowboard": 36, "sports_ball": 37, "kite": 38, "baseball_bat": 39,
"baseball_glove": 40, "skateboard": 41, "surfboard": 42, "tennis_racket": 43,
"bottle": 44, "wine_glass": 46, "cup": 47, "fork": 48, "knife": 49, "spoon": 50,
"bowl": 51, "banana": 52, "apple": 53, "sandwich": 54, "orange": 55, "broccoli": 56,
"carrot": 57, "hot_dog": 58, "pizza": 59, "donut": 60, "cake": 61, "chair": 62,
"couch": 63, "potted_plant": 64, "bed": 65, "dining_table": 67, "toilet": 70,
"tv": 72, "laptop": 73, "mouse": 74, "remote": 75, "keyboard": 76, "cell_phone": 77,
"microwave": 78, "oven": 79, "toaster": 80, "sink": 81, "refrigerator": 83,
"book": 84, "clock": 85, "vase": 86, "scissors": 87, "teddy_bear": 88, "hair_drier": 89,
"toothbrush": 90
}
labels_mapping = {}
for key, labels in db_labels.items():
if labels in tf_annotation_labels.keys():
labels_mapping[tf_annotation_labels[labels]] = key
if not len(labels_mapping.values()):
raise Exception('No labels found for tf annotation')
queue.enqueue_call(func=create_thread,
args=(tid, labels_mapping, request.user),
job_id='tf_annotation.create/{}'.format(tid),
timeout=604800)
slogger.task[tid].info('tensorflow annotation job enqueued with labels {}'.format(labels_mapping))
except Exception as ex:
try:
slogger.task[tid].exception("exception was occured during tensorflow annotation request", exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse()
@login_required
@permission_required(perm=['engine.task.access'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def check(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and 'cancel' in job.meta:
return JsonResponse({'status': 'finished'})
data = {}
if job is None:
data['status'] = 'unknown'
elif job.is_queued:
data['status'] = 'queued'
elif job.is_started:
data['status'] = 'started'
data['progress'] = job.meta['progress']
elif job.is_finished:
data['status'] = 'finished'
job.delete()
else:
data['status'] = 'failed'
data['stderr'] = job.exc_info
job.delete()
except Exception:
data['status'] = 'unknown'
return JsonResponse(data)
@login_required
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def cancel(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is None or job.is_finished or job.is_failed:
raise Exception('Task is not being annotated currently')
elif 'cancel' not in job.meta:
job.meta['cancel'] = True
job.save()
except Exception as ex:
try:
slogger.task[tid].exception("cannot cancel tensorflow annotation for task #{}".format(tid), exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse()
| true | true |
1c321f1f4f98f8c5bb24f8b5a4a96a2bb27fd616 | 10,137 | py | Python | hammers/scripts/maintenance_reservation.py | ChameleonCloud/bag-o-hammers | 0faaf9b21aceb155dc7da2ea92cf77af815c11e7 | [
"Apache-2.0"
] | null | null | null | hammers/scripts/maintenance_reservation.py | ChameleonCloud/bag-o-hammers | 0faaf9b21aceb155dc7da2ea92cf77af815c11e7 | [
"Apache-2.0"
] | 8 | 2018-05-24T01:07:27.000Z | 2021-09-01T18:02:29.000Z | hammers/scripts/maintenance_reservation.py | ChameleonCloud/bag-o-hammers | 0faaf9b21aceb155dc7da2ea92cf77af815c11e7 | [
"Apache-2.0"
] | 2 | 2016-12-07T01:12:41.000Z | 2018-08-17T16:57:54.000Z | import datetime
import logging
import os
import sys
import traceback
from dateutil import tz
from blazarclient import client as blazar_client
from ironicclient import client as ironic_client
from keystoneauth1 import adapter, loading, session
from keystoneauth1.identity import v3
from hammers import MySqlArgs
from hammers.slack import Slackbot
from hammers.util import base_parser
logging.basicConfig()
MAINT_LEASE_NAME = 'maint-of-{node_name}-by-{operator}-for-{reason}'
DATETIME_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
def valid_date(s):
if s:
try:
return datetime.datetime.strptime(s, DATETIME_STR_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
return None
def append_global_identity_args(parser, argv):
loading.register_auth_argparse_arguments(parser, argv, default='password')
parser.set_defaults(os_auth_url=os.getenv('OS_AUTH_URL', None))
parser.set_defaults(os_username=os.getenv('OS_USERNAME', None))
parser.set_defaults(os_password=os.getenv('OS_PASSWORD', None))
parser.set_defaults(os_project_name=os.getenv('OS_PROJECT_NAME', None))
parser.set_defaults(os_project_id=os.getenv('OS_PROJECT_ID', None))
parser.set_defaults(os_project_domain_id=os.getenv(
'OS_PROJECT_DOMAIN_ID', 'default'))
parser.set_defaults(os_project_domain_name=os.getenv(
'OS_PROJECT_DOMAIN_NAME', 'default'))
parser.set_defaults(os_user_domain_id=os.getenv(
'OS_USER_DOMAIN_ID', 'default'))
parser.set_defaults(os_user_domain_name=os.getenv(
'OS_USER_DOMAIN_NAME', 'default'))
parser.set_defaults(os_region_name=os.getenv('OS_REGION_NAME', None))
def get_session(auth_url, username, password, project_name, user_domain_name='default',
project_domain_name='default', region_name=None, interface=None):
auth = v3.Password(auth_url=auth_url,
username=username,
password=password,
project_name=project_name,
user_domain_name=user_domain_name,
project_domain_name=project_domain_name)
sess = session.Session(auth=auth)
return adapter.Adapter(sess, region_name=region_name, interface=interface)
def get_nodes(sess, node_id_or_names):
token = sess.get_token()
try:
ironic_url = sess.get_endpoint(
service_type='baremetal', interface='public')
except Exception:
traceback.print_exc(file=sys.stdout)
ironic = ironic_client.get_client(1, token=token, endpoint=ironic_url)
nodes = []
for node_id_or_name in node_id_or_names:
nodes.append(ironic.node.get(node_id_or_name))
return nodes
def get_node_earliest_reserve_time(db, node_uuid, requested_hours):
sql = '''SELECT l.start_date AS start_date, l.end_date AS end_date
FROM blazar.leases AS l
JOIN blazar.reservations AS r ON r.lease_id = l.id
JOIN blazar.computehost_allocations AS ca ON r.id = ca.reservation_id
JOIN blazar.computehosts AS ch ON ch.id = ca.compute_host_id
WHERE ch.hypervisor_hostname=%(node_uuid)s
AND l.deleted IS NULL
AND l.end_date > UTC_TIMESTAMP()
ORDER BY l.start_date'''
current_time = datetime.datetime.utcnow()
last_end_time = None
for row in db.query(sql, {'node_uuid': node_uuid}):
lease_start_time = row['start_date']
lease_end_time = row['end_date']
if lease_start_time < current_time:
lease_start_time = current_time
if last_end_time:
if ((lease_start_time - last_end_time).total_seconds() - 600) / 3600.0 > requested_hours:
# allow 10 minutes break after previous lease
return last_end_time + datetime.timedelta(minutes=10)
last_end_time = lease_end_time
if last_end_time:
# allow 10 minutes break after previous lease
return last_end_time + datetime.timedelta(minutes=10)
else:
return current_time
def reserve(sess, node, start_time, requested_hours, reason, operator, dryrun):
end_time = start_time + datetime.timedelta(hours=requested_hours)
start_time_str_in_ct = start_time.replace(tzinfo=tz.gettz('UTC')).astimezone(
tz.gettz('America/Chicago')).strftime(DATETIME_STR_FORMAT)
end_time_str_in_ct = end_time.replace(tzinfo=tz.gettz('UTC')).astimezone(
tz.gettz('America/Chicago')).strftime(DATETIME_STR_FORMAT)
print(((
"Creating maintenance reservation for node {node_name} "
"(id: {node_uuid}), starting {start} and ending {end} in central time"
).format(
node_name=node.name,
node_uuid=node.uuid,
start=start_time_str_in_ct,
end=end_time_str_in_ct)
))
if not dryrun:
blazar = blazar_client.Client(
1, session=sess, service_type='reservation')
resource_properties = '["=", "$uid", "{node_uuid}"]'.format(
node_uuid=node.uuid)
phys_res = {'min': "1", 'max': "1", 'hypervisor_properties': "",
'resource_properties': resource_properties, 'resource_type': 'physical:host'}
lease_name = MAINT_LEASE_NAME.format(node_name=node.name.replace(' ', '_'),
operator=operator.replace(
' ', '_'),
reason=reason.replace(' ', '_'))
lease = blazar.lease.create(name=lease_name,
start=start_time.strftime(
'%Y-%m-%d %H:%M'),
end=end_time.strftime('%Y-%m-%d %H:%M'),
reservations=[phys_res],
events=[])
print(("Lease {name} (id: {id}) created successfully!".format(
name=lease['name'], id=lease['id'])))
return start_time_str_in_ct, end_time_str_in_ct
def main(argv=None):
if argv is None:
argv = sys.argv
parser = base_parser('Reserve nodes for maintenance')
append_global_identity_args(parser, argv)
mysqlargs = MySqlArgs({
'user': 'root',
'password': '',
'host': 'localhost',
'port': 3306,
})
mysqlargs.inject(parser)
parser.add_argument('--operator', type=str, required=True,
help='Chameleon account username of the operator')
parser.add_argument('--nodes', type=str, required=True,
help='node ids or node names; comma separated')
parser.add_argument('--reason', type=str, required=True,
help='maintenance reasons')
parser.add_argument('--dry-run', action="store_true",
help='perform a trial run without making reservations')
parser.add_argument('--start-time', type=valid_date, default=None,
help='lease start time (YYYY-mm-DD HH:MM:SS); if not given, start at the earliest possible datetime')
parser.add_argument('--estimate-hours', type=int, default=168,
help='estimated hours required for maintenance; default is 168 hours (1 week)')
args = parser.parse_args(argv[1:])
slack = Slackbot(args.slack, script_name='maintenance-reservation') if args.slack else None
# connect to database
mysqlargs.extract(args)
db = mysqlargs.connect()
# keystone authentication
auth_args = {'auth_url': args.os_auth_url,
'username': args.os_username,
'password': args.os_password,
'project_name': args.os_project_name,
'region_name': args.os_region_name,
'interface': 'public'}
if args.os_user_domain_name:
auth_args['user_domain_name'] = args.os_user_domain_name
if args.os_project_domain_name:
auth_args['project_domain_name'] = args.os_project_domain_name
# get admin session for node information
admin_sess = get_session(**auth_args)
# get maint session for creating lease
auth_args['project_name'] = 'maintenance'
maint_sess = get_session(**auth_args)
try:
# get node details
nodes = get_nodes(admin_sess, args.nodes.split(','))
report_info = {}
for node in nodes:
lease_start_time = args.start_time
if not lease_start_time:
# find the earliest reservation time for the node
lease_start_time = get_node_earliest_reserve_time(db, node.uuid, args.estimate_hours)
else:
# convert to utc
lease_start_time = lease_start_time.replace(tzinfo=tz.tzlocal()).astimezone(tz.gettz('UTC'))
# reserve
reserve_args = {'sess': maint_sess,
'node': node,
'start_time': lease_start_time,
'requested_hours': args.estimate_hours,
'reason': args.reason,
'operator': args.operator,
'dryrun': args.dry_run}
start_time_str, end_time_str = reserve(**reserve_args)
report_info[node.name] = (start_time_str, end_time_str)
# summary
report_lines = [
('Node {node_name} at {region} is under maintenance '
'from {start_time} to {end_time}').format(
node_name=key,
region=args.os_region_name,
start_time=value[0],
end_time=value[1]
)
for key, value in report_info.items()
]
if report_lines:
report = '\n'.join(report_lines)
print(report)
if slack:
slack.message(report)
else:
print('nothing reserved!')
except:
if slack:
slack.exception()
raise
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 38.988462 | 125 | 0.61606 | import datetime
import logging
import os
import sys
import traceback
from dateutil import tz
from blazarclient import client as blazar_client
from ironicclient import client as ironic_client
from keystoneauth1 import adapter, loading, session
from keystoneauth1.identity import v3
from hammers import MySqlArgs
from hammers.slack import Slackbot
from hammers.util import base_parser
logging.basicConfig()
MAINT_LEASE_NAME = 'maint-of-{node_name}-by-{operator}-for-{reason}'
DATETIME_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
def valid_date(s):
if s:
try:
return datetime.datetime.strptime(s, DATETIME_STR_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
return None
def append_global_identity_args(parser, argv):
loading.register_auth_argparse_arguments(parser, argv, default='password')
parser.set_defaults(os_auth_url=os.getenv('OS_AUTH_URL', None))
parser.set_defaults(os_username=os.getenv('OS_USERNAME', None))
parser.set_defaults(os_password=os.getenv('OS_PASSWORD', None))
parser.set_defaults(os_project_name=os.getenv('OS_PROJECT_NAME', None))
parser.set_defaults(os_project_id=os.getenv('OS_PROJECT_ID', None))
parser.set_defaults(os_project_domain_id=os.getenv(
'OS_PROJECT_DOMAIN_ID', 'default'))
parser.set_defaults(os_project_domain_name=os.getenv(
'OS_PROJECT_DOMAIN_NAME', 'default'))
parser.set_defaults(os_user_domain_id=os.getenv(
'OS_USER_DOMAIN_ID', 'default'))
parser.set_defaults(os_user_domain_name=os.getenv(
'OS_USER_DOMAIN_NAME', 'default'))
parser.set_defaults(os_region_name=os.getenv('OS_REGION_NAME', None))
def get_session(auth_url, username, password, project_name, user_domain_name='default',
project_domain_name='default', region_name=None, interface=None):
auth = v3.Password(auth_url=auth_url,
username=username,
password=password,
project_name=project_name,
user_domain_name=user_domain_name,
project_domain_name=project_domain_name)
sess = session.Session(auth=auth)
return adapter.Adapter(sess, region_name=region_name, interface=interface)
def get_nodes(sess, node_id_or_names):
token = sess.get_token()
try:
ironic_url = sess.get_endpoint(
service_type='baremetal', interface='public')
except Exception:
traceback.print_exc(file=sys.stdout)
ironic = ironic_client.get_client(1, token=token, endpoint=ironic_url)
nodes = []
for node_id_or_name in node_id_or_names:
nodes.append(ironic.node.get(node_id_or_name))
return nodes
def get_node_earliest_reserve_time(db, node_uuid, requested_hours):
sql = '''SELECT l.start_date AS start_date, l.end_date AS end_date
FROM blazar.leases AS l
JOIN blazar.reservations AS r ON r.lease_id = l.id
JOIN blazar.computehost_allocations AS ca ON r.id = ca.reservation_id
JOIN blazar.computehosts AS ch ON ch.id = ca.compute_host_id
WHERE ch.hypervisor_hostname=%(node_uuid)s
AND l.deleted IS NULL
AND l.end_date > UTC_TIMESTAMP()
ORDER BY l.start_date'''
current_time = datetime.datetime.utcnow()
last_end_time = None
for row in db.query(sql, {'node_uuid': node_uuid}):
lease_start_time = row['start_date']
lease_end_time = row['end_date']
if lease_start_time < current_time:
lease_start_time = current_time
if last_end_time:
if ((lease_start_time - last_end_time).total_seconds() - 600) / 3600.0 > requested_hours:
return last_end_time + datetime.timedelta(minutes=10)
last_end_time = lease_end_time
if last_end_time:
return last_end_time + datetime.timedelta(minutes=10)
else:
return current_time
def reserve(sess, node, start_time, requested_hours, reason, operator, dryrun):
end_time = start_time + datetime.timedelta(hours=requested_hours)
start_time_str_in_ct = start_time.replace(tzinfo=tz.gettz('UTC')).astimezone(
tz.gettz('America/Chicago')).strftime(DATETIME_STR_FORMAT)
end_time_str_in_ct = end_time.replace(tzinfo=tz.gettz('UTC')).astimezone(
tz.gettz('America/Chicago')).strftime(DATETIME_STR_FORMAT)
print(((
"Creating maintenance reservation for node {node_name} "
"(id: {node_uuid}), starting {start} and ending {end} in central time"
).format(
node_name=node.name,
node_uuid=node.uuid,
start=start_time_str_in_ct,
end=end_time_str_in_ct)
))
if not dryrun:
blazar = blazar_client.Client(
1, session=sess, service_type='reservation')
resource_properties = '["=", "$uid", "{node_uuid}"]'.format(
node_uuid=node.uuid)
phys_res = {'min': "1", 'max': "1", 'hypervisor_properties': "",
'resource_properties': resource_properties, 'resource_type': 'physical:host'}
lease_name = MAINT_LEASE_NAME.format(node_name=node.name.replace(' ', '_'),
operator=operator.replace(
' ', '_'),
reason=reason.replace(' ', '_'))
lease = blazar.lease.create(name=lease_name,
start=start_time.strftime(
'%Y-%m-%d %H:%M'),
end=end_time.strftime('%Y-%m-%d %H:%M'),
reservations=[phys_res],
events=[])
print(("Lease {name} (id: {id}) created successfully!".format(
name=lease['name'], id=lease['id'])))
return start_time_str_in_ct, end_time_str_in_ct
def main(argv=None):
if argv is None:
argv = sys.argv
parser = base_parser('Reserve nodes for maintenance')
append_global_identity_args(parser, argv)
mysqlargs = MySqlArgs({
'user': 'root',
'password': '',
'host': 'localhost',
'port': 3306,
})
mysqlargs.inject(parser)
parser.add_argument('--operator', type=str, required=True,
help='Chameleon account username of the operator')
parser.add_argument('--nodes', type=str, required=True,
help='node ids or node names; comma separated')
parser.add_argument('--reason', type=str, required=True,
help='maintenance reasons')
parser.add_argument('--dry-run', action="store_true",
help='perform a trial run without making reservations')
parser.add_argument('--start-time', type=valid_date, default=None,
help='lease start time (YYYY-mm-DD HH:MM:SS); if not given, start at the earliest possible datetime')
parser.add_argument('--estimate-hours', type=int, default=168,
help='estimated hours required for maintenance; default is 168 hours (1 week)')
args = parser.parse_args(argv[1:])
slack = Slackbot(args.slack, script_name='maintenance-reservation') if args.slack else None
mysqlargs.extract(args)
db = mysqlargs.connect()
auth_args = {'auth_url': args.os_auth_url,
'username': args.os_username,
'password': args.os_password,
'project_name': args.os_project_name,
'region_name': args.os_region_name,
'interface': 'public'}
if args.os_user_domain_name:
auth_args['user_domain_name'] = args.os_user_domain_name
if args.os_project_domain_name:
auth_args['project_domain_name'] = args.os_project_domain_name
admin_sess = get_session(**auth_args)
auth_args['project_name'] = 'maintenance'
maint_sess = get_session(**auth_args)
try:
nodes = get_nodes(admin_sess, args.nodes.split(','))
report_info = {}
for node in nodes:
lease_start_time = args.start_time
if not lease_start_time:
lease_start_time = get_node_earliest_reserve_time(db, node.uuid, args.estimate_hours)
else:
lease_start_time = lease_start_time.replace(tzinfo=tz.tzlocal()).astimezone(tz.gettz('UTC'))
reserve_args = {'sess': maint_sess,
'node': node,
'start_time': lease_start_time,
'requested_hours': args.estimate_hours,
'reason': args.reason,
'operator': args.operator,
'dryrun': args.dry_run}
start_time_str, end_time_str = reserve(**reserve_args)
report_info[node.name] = (start_time_str, end_time_str)
report_lines = [
('Node {node_name} at {region} is under maintenance '
'from {start_time} to {end_time}').format(
node_name=key,
region=args.os_region_name,
start_time=value[0],
end_time=value[1]
)
for key, value in report_info.items()
]
if report_lines:
report = '\n'.join(report_lines)
print(report)
if slack:
slack.message(report)
else:
print('nothing reserved!')
except:
if slack:
slack.exception()
raise
if __name__ == '__main__':
sys.exit(main(sys.argv))
| true | true |
1c321f551a7a9daf6e1a0849a0b6f9fcf2550348 | 25,940 | py | Python | core/platform/auth/firebase_auth_services.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | core/platform/auth/firebase_auth_services.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | core/platform/auth/firebase_auth_services.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service layer for handling user-authentication with Firebase.
Oppia depends on OpenID Connect 1.0 to handle user authentication. We use
[Firebase authentication](https://firebase.google.com/docs/auth) to do the
heavy-lifting, especially for securely storing user credentials and associating
users to their identity providers. This helps us minimize the contact we make
with private information.
Terminology:
OpenID Connect 1.0 (OIDC):
A simple identity layer on top of the OAuth 2.0 protocol. It is a
specification (i.e. a strict set of algorithms, data structures, and
rules) that defines how two parties must share data about a user in
a secure way on that user's behalf.
OAuth 2.0 (OAuth):
The industry-standard protocol for authorization. It enables a
third-party application to obtain limited access to an HTTP service on
behalf of a user.
Claim:
A piece of information about a user (name, address, phone number, etc.)
that has been encrypted and digitally signed.
JSON Web Token (JWT):
A compact and URL-safe protocol primarily designed to send Claims
between two parties. Claims are organized into JSON objects that map
"Claim Names" to "Claim Values".
Identity provider:
An entity that creates, maintains, and manages identity information and
provides authentication services. Such services rely on JWTs to send
identity information. Examples of identity providers include: Google,
Facebook, Email verification links, and Text message SMS codes.
Subject Identifier:
A Claim that can uniquely identify a user. It is locally unique and
never reassigned with respect to the provider who issued it. The Claim's
name is 'sub'.
Example values: `24400320` or `AItOawmwtWwcT0k51BayewNvutrJUqsvl6qs7A4`.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core.domain import auth_domain
from core.platform import models
import feconf
import python_utils
import firebase_admin
from firebase_admin import auth as firebase_auth
from firebase_admin import exceptions as firebase_exceptions
auth_models, user_models = (
models.Registry.import_models([models.NAMES.auth, models.NAMES.user]))
transaction_services = models.Registry.import_transaction_services()
def establish_firebase_connection():
"""Establishes the connection to Firebase needed by the rest of the SDK.
All Firebase operations require an "app", the abstraction used for a
Firebase server connection. The initialize_app() function raises an error
when it's called more than once, however, so we make this function
idempotent by trying to "get" the app first.
Returns:
firebase_admin.App. The App being by the Firebase SDK.
Raises:
Exception. The Firebase app has a genuine problem.
"""
try:
firebase_admin.get_app()
except ValueError as error:
if 'initialize_app' in python_utils.UNICODE(error):
firebase_admin.initialize_app(
options={'projectId': feconf.OPPIA_PROJECT_ID})
else:
raise
def establish_auth_session(request, response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
request: webapp2.Request. The request with the authorization to begin a
new session.
response: webapp2.Response. The response to establish the new session
upon.
"""
claims = _get_auth_claims_from_session_cookie(_get_session_cookie(request))
# If the request already contains a valid session cookie, then there's no
# action necessary; the session is already established.
if claims is not None:
return
fresh_cookie = firebase_auth.create_session_cookie(
_get_id_token(request), feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
response.set_cookie(
feconf.FIREBASE_SESSION_COOKIE_NAME,
value=fresh_cookie,
max_age=feconf.FIREBASE_SESSION_COOKIE_MAX_AGE,
overwrite=True,
# Toggles https vs http. The production server uses https, but the local
# developement server uses http.
secure=(not constants.EMULATOR_MODE),
# Using the HttpOnly flag when generating a cookie helps mitigate the
# risk of client side script accessing the protected cookie (if the
# browser supports it).
# Learn more: https://owasp.org/www-community/HttpOnly.
httponly=True)
def destroy_auth_session(response):
"""Clears login cookies from the given response headers.
Args:
response: webapp2.Response. Response to clear the cookies from.
"""
response.delete_cookie(feconf.FIREBASE_SESSION_COOKIE_NAME)
def get_auth_claims_from_request(request):
"""Authenticates the request and returns claims about its authorizer.
Args:
request: webapp2.Request. The HTTP request to authenticate.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no user
is signed in, then returns None.
Raises:
InvalidAuthSessionError. The request contains an invalid session.
StaleAuthSessionError. The cookie has lost its authority.
"""
return _get_auth_claims_from_session_cookie(_get_session_cookie(request))
def mark_user_for_deletion(user_id):
"""Marks the user, and all of their auth associations, as deleted.
This function also disables the user's Firebase account so that they cannot
be used to sign in.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if assoc_by_user_id_model is not None:
assoc_by_user_id_model.deleted = True
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel.get_by_user_id(user_id)
if assoc_by_user_id_model is None else
# NOTE: We use get_multi(include_deleted=True) because get() returns
# None for models with deleted=True, but we need to make changes to
# those models when managing deletion.
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[assoc_by_user_id_model.firebase_auth_id], include_deleted=True)[0])
if assoc_by_auth_id_model is not None:
assoc_by_auth_id_model.deleted = True
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
else:
logging.error(
'[WIPEOUT] User with user_id=%s has no Firebase account' % user_id)
return
try:
firebase_auth.update_user(assoc_by_auth_id_model.id, disabled=True)
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception(
'[WIPEOUT] Failed to disable Firebase account! Stack trace:')
def delete_external_auth_associations(user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return
try:
firebase_auth.delete_user(auth_id)
except firebase_auth.UserNotFoundError:
logging.exception('[WIPEOUT] Firebase account already deleted')
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
def verify_external_auth_associations_are_deleted(user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return True
try:
# TODO(#11474): Replace with `get_users()` (plural) because `get_user()`
# (singular) does not distinguish between disabled and deleted users. We
# can't do it right now because firebase-admin==3.2.1 does not offer the
# get_users() API. We will need to fix this when we've moved to a more
# recent version (after the Python 3 migration).
firebase_auth.get_user(auth_id)
except firebase_auth.UserNotFoundError:
return True
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, will keep retrying the other "delete" family of functions
# until this returns True (in 12h intervals).
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
return False
def get_auth_id_from_user_id(user_id, include_deleted=False):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
include_deleted: bool. Whether to return the ID of models marked for
deletion.
Returns:
str|None. The auth ID associated with the given user ID, or None if no
association exists.
"""
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=include_deleted)
return (
None if assoc_by_user_id_model is None else
assoc_by_user_id_model.firebase_auth_id)
def get_multi_auth_ids_from_user_ids(user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user IDs,
or None for associations which don't exist.
"""
return [
None if model is None else model.firebase_auth_id
for model in auth_models.UserAuthDetailsModel.get_multi(user_ids)
]
def get_user_id_from_auth_id(auth_id, include_deleted=False):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
include_deleted: bool. Whether to return the ID of models marked for
deletion.
Returns:
str|None. The user ID associated with the given auth ID, or None if no
association exists.
"""
(assoc_by_auth_id_model,) = (
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[auth_id], include_deleted=include_deleted))
return (
None if assoc_by_auth_id_model is None else
assoc_by_auth_id_model.user_id)
def get_multi_user_ids_from_auth_ids(auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth IDs,
or None for associations which don't exist.
"""
return [
None if model is None else model.user_id
for model in auth_models.UserIdByFirebaseAuthIdModel.get_multi(auth_ids)
]
def associate_auth_id_with_user_id(auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association to
commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
user_id_collision = get_user_id_from_auth_id(auth_id, include_deleted=True)
if user_id_collision is not None:
raise Exception('auth_id=%r is already associated with user_id=%r' % (
auth_id, user_id_collision))
auth_id_collision = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id_collision is not None:
raise Exception('user_id=%r is already associated with auth_id=%r' % (
user_id, auth_id_collision))
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None, even with
# include_deleted=True.
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id))
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
# The {user_id: auth_id} mapping needs to be created, but the model used to
# store the relationship might already exist because other services use it
# as well (e.g. user_services uses UserAuthDetailsModel.parent_user_id). In
# such situations, the return value of get_auth_id_from_user_id would be
# None, so that isn't strong enough to determine whether we need to create a
# new model rather than update an existing one.
#
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None):
assoc_by_user_id_model = auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
def associate_multi_auth_ids_with_user_ids(auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
# Turn list(pair) to pair(list): https://stackoverflow.com/a/7558990/4859885
auth_ids, user_ids = python_utils.ZIP(*auth_id_user_id_pairs)
user_id_collisions = get_multi_user_ids_from_auth_ids(auth_ids)
if any(user_id is not None for user_id in user_id_collisions):
user_id_collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (auth_id, user_id)
for auth_id, user_id in python_utils.ZIP(
auth_ids, user_id_collisions)
if user_id is not None)
raise Exception('already associated: %s' % user_id_collisions)
auth_id_collisions = get_multi_auth_ids_from_user_ids(user_ids)
if any(auth_id is not None for auth_id in auth_id_collisions):
auth_id_collisions = ', '.join(
'{user_id=%r: auth_id=%r}' % (user_id, auth_id)
for user_id, auth_id in python_utils.ZIP(
user_ids, auth_id_collisions)
if auth_id is not None)
raise Exception('already associated: %s' % auth_id_collisions)
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None.
assoc_by_auth_id_models = [
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id)
for auth_id, user_id in python_utils.ZIP(auth_ids, user_ids)
]
auth_models.UserIdByFirebaseAuthIdModel.update_timestamps_multi(
assoc_by_auth_id_models)
auth_models.UserIdByFirebaseAuthIdModel.put_multi(assoc_by_auth_id_models)
# The {user_id: auth_id} mapping needs to be created, but the model used to
# store the relationship might already exist because other services use it
# as well (e.g. user_services uses UserAuthDetailsModel.parent_user_id). In
# such situations, the return value of get_multi_auth_ids_from_user_ids
# would be None, so that isn't strong enough to determine whether we need to
# create a new model rather than update an existing one.
assoc_by_user_id_models = [
auth_models.UserAuthDetailsModel(id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id, assoc_by_user_id_model in python_utils.ZIP(
auth_ids, user_ids,
auth_models.UserAuthDetailsModel.get_multi(user_ids))
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None)
]
if assoc_by_user_id_models:
auth_models.UserAuthDetailsModel.update_timestamps_multi(
assoc_by_user_id_models)
auth_models.UserAuthDetailsModel.put_multi(assoc_by_user_id_models)
def grant_super_admin_privileges(user_id):
"""Grants the user super admin privileges.
Args:
user_id: str. The Oppia user ID to promote to super admin.
"""
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
custom_claims = '{"role":"%s"}' % feconf.FIREBASE_ROLE_SUPER_ADMIN
firebase_auth.set_custom_user_claims(auth_id, custom_claims)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def revoke_super_admin_privileges(user_id):
"""Revokes the user's super admin privileges.
Args:
user_id: str. The Oppia user ID to revoke privileges from.
"""
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
firebase_auth.set_custom_user_claims(auth_id, None)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def seed_firebase():
"""Prepares Oppia and Firebase to run the SeedFirebaseOneOffJob.
NOTE: This function is idempotent.
TODO(#11462): Delete this handler once the Firebase migration logic is
rollback-safe and all backup data is using post-migration data.
"""
seed_model = auth_models.FirebaseSeedModel.get(
auth_models.ONLY_FIREBASE_SEED_MODEL_ID, strict=False)
if seed_model is None: # Exactly 1 seed model must exist.
auth_models.FirebaseSeedModel(
id=auth_models.ONLY_FIREBASE_SEED_MODEL_ID).put()
user_ids_with_admin_email = [
key.id() for key in user_models.UserSettingsModel.query(
user_models.UserSettingsModel.email == feconf.ADMIN_EMAIL_ADDRESS
).iter(keys_only=True)
]
assoc_by_user_id_models = [
model for model in auth_models.UserAuthDetailsModel.get_multi(
user_ids_with_admin_email)
if model is not None and model.gae_id != feconf.SYSTEM_COMMITTER_ID
]
if len(assoc_by_user_id_models) != 1:
raise Exception(
'%s must correspond to exactly 1 user (excluding user_id=%s), but '
'found user_ids=[%s]' % (
feconf.ADMIN_EMAIL_ADDRESS, feconf.SYSTEM_COMMITTER_ID,
', '.join(m.id for m in assoc_by_user_id_models)))
else:
assoc_by_user_id_model = assoc_by_user_id_models[0]
user_id = assoc_by_user_id_model.id
auth_id = assoc_by_user_id_model.firebase_auth_id
if auth_id is None:
auth_id = user_id[4:] if user_id.startswith('uid_') else user_id
assoc_by_user_id_model.firebase_auth_id = auth_id
assoc_by_user_id_model.update_timestamps(update_last_updated_time=False)
assoc_by_user_id_model.put()
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel.get(auth_id, strict=False))
if assoc_by_auth_id_model is None:
auth_models.UserIdByFirebaseAuthIdModel(
id=auth_id, user_id=user_id).put()
elif assoc_by_auth_id_model.user_id != user_id:
assoc_by_auth_id_model.user_id = user_id
assoc_by_auth_id_model.update_timestamps(update_last_updated_time=False)
assoc_by_auth_id_model.put()
custom_claims = '{"role":"%s"}' % feconf.FIREBASE_ROLE_SUPER_ADMIN
try:
user = firebase_auth.get_user_by_email(feconf.ADMIN_EMAIL_ADDRESS)
except firebase_auth.UserNotFoundError:
create_new_firebase_account = True
else:
if user.uid != auth_id:
firebase_auth.update_user(user.uid, disabled=True)
firebase_auth.delete_user(user.uid)
create_new_firebase_account = True
else:
firebase_auth.set_custom_user_claims(user.uid, custom_claims)
create_new_firebase_account = False
if create_new_firebase_account:
firebase_auth.import_users([
firebase_auth.ImportUserRecord(
auth_id, email=feconf.ADMIN_EMAIL_ADDRESS,
custom_claims=custom_claims),
])
def _get_session_cookie(request):
"""Returns the session cookie authorizing the signed in user, if present.
Args:
request: webapp2.Request. The HTTP request to inspect.
Returns:
str|None. Value of the session cookie authorizing the signed in user, if
present, otherwise None.
"""
return request.cookies.get(feconf.FIREBASE_SESSION_COOKIE_NAME)
def _get_id_token(request):
"""Returns the ID token authorizing a user, or None if missing.
Oppia uses the OAuth 2.0's Bearer authentication scheme to send ID Tokens.
Bearer authentication (a.k.a. token authentication) is an HTTP
authentication scheme based on "bearer tokens", an encrypted JWT generated
by a trusted identity provider in response to login requests.
The name "Bearer authentication" can be understood as: "give access to the
bearer of this token." These tokens _must_ be sent in the `Authorization`
header of HTTP requests, and _must_ have the format: `Bearer <token>`.
Learn more about:
HTTP authentication schemes:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication
OAuth 2.0 Bearer authentication scheme:
https://oauth.net/2/bearer-tokens/
OpenID Connect 1.0 ID Tokens:
https://openid.net/specs/openid-connect-core-1_0.html#IDToken
Args:
request: webapp2.Request. The HTTP request to inspect.
Returns:
str|None. The ID Token of the request, if present, otherwise None.
"""
scheme, _, token = request.headers.get('Authorization', '').partition(' ')
return token if scheme == 'Bearer' else None
def _get_auth_claims_from_session_cookie(cookie):
"""Returns claims from the session cookie, or None if invalid.
Args:
cookie: str|None. The session cookie to extract claims from.
Returns:
AuthClaims|None. The claims from the session cookie, if available.
Otherwise returns None.
Raises:
InvalidAuthSessionError. The cookie has an invalid value.
StaleAuthSessionError. The cookie has lost its authority.
"""
# It's OK for a session cookie to be None or empty, it just means that the
# request hasn't been authenticated.
if not cookie:
return None
try:
claims = firebase_auth.verify_session_cookie(cookie, check_revoked=True)
except firebase_auth.ExpiredSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has expired')
except firebase_auth.RevokedSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has been revoked')
except (firebase_exceptions.FirebaseError, ValueError) as error:
raise auth_domain.InvalidAuthSessionError('session invalid: %s' % error)
else:
return _create_auth_claims(claims)
def _create_auth_claims(firebase_claims):
"""Returns a new AuthClaims domain object from Firebase claims.
Args:
firebase_claims: dict(str: *). The raw claims returned by the Firebase
SDK.
Returns:
AuthClaims. Oppia's representation of auth claims.
"""
auth_id = firebase_claims.get('sub')
email = firebase_claims.get('email')
role_is_super_admin = (
email == feconf.ADMIN_EMAIL_ADDRESS or
firebase_claims.get('role') == feconf.FIREBASE_ROLE_SUPER_ADMIN)
return auth_domain.AuthClaims(
auth_id, email, role_is_super_admin=role_is_super_admin)
| 40.404984 | 80 | 0.712452 |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.domain import auth_domain
from core.platform import models
import feconf
import python_utils
import firebase_admin
from firebase_admin import auth as firebase_auth
from firebase_admin import exceptions as firebase_exceptions
auth_models, user_models = (
models.Registry.import_models([models.NAMES.auth, models.NAMES.user]))
transaction_services = models.Registry.import_transaction_services()
def establish_firebase_connection():
try:
firebase_admin.get_app()
except ValueError as error:
if 'initialize_app' in python_utils.UNICODE(error):
firebase_admin.initialize_app(
options={'projectId': feconf.OPPIA_PROJECT_ID})
else:
raise
def establish_auth_session(request, response):
claims = _get_auth_claims_from_session_cookie(_get_session_cookie(request))
# action necessary; the session is already established.
if claims is not None:
return
fresh_cookie = firebase_auth.create_session_cookie(
_get_id_token(request), feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
response.set_cookie(
feconf.FIREBASE_SESSION_COOKIE_NAME,
value=fresh_cookie,
max_age=feconf.FIREBASE_SESSION_COOKIE_MAX_AGE,
overwrite=True,
# Toggles https vs http. The production server uses https, but the local
# developement server uses http.
secure=(not constants.EMULATOR_MODE),
# Using the HttpOnly flag when generating a cookie helps mitigate the
# risk of client side script accessing the protected cookie (if the
# browser supports it).
# Learn more: https://owasp.org/www-community/HttpOnly.
httponly=True)
def destroy_auth_session(response):
response.delete_cookie(feconf.FIREBASE_SESSION_COOKIE_NAME)
def get_auth_claims_from_request(request):
return _get_auth_claims_from_session_cookie(_get_session_cookie(request))
def mark_user_for_deletion(user_id):
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if assoc_by_user_id_model is not None:
assoc_by_user_id_model.deleted = True
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel.get_by_user_id(user_id)
if assoc_by_user_id_model is None else
# NOTE: We use get_multi(include_deleted=True) because get() returns
# None for models with deleted=True, but we need to make changes to
# those models when managing deletion.
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[assoc_by_user_id_model.firebase_auth_id], include_deleted=True)[0])
if assoc_by_auth_id_model is not None:
assoc_by_auth_id_model.deleted = True
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
else:
logging.error(
'[WIPEOUT] User with user_id=%s has no Firebase account' % user_id)
return
try:
firebase_auth.update_user(assoc_by_auth_id_model.id, disabled=True)
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception(
'[WIPEOUT] Failed to disable Firebase account! Stack trace:')
def delete_external_auth_associations(user_id):
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return
try:
firebase_auth.delete_user(auth_id)
except firebase_auth.UserNotFoundError:
logging.exception('[WIPEOUT] Firebase account already deleted')
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
def verify_external_auth_associations_are_deleted(user_id):
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return True
try:
# TODO(#11474): Replace with `get_users()` (plural) because `get_user()`
# (singular) does not distinguish between disabled and deleted users. We
# can't do it right now because firebase-admin==3.2.1 does not offer the
# recent version (after the Python 3 migration).
firebase_auth.get_user(auth_id)
except firebase_auth.UserNotFoundError:
return True
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, will keep retrying the other "delete" family of functions
# until this returns True (in 12h intervals).
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
return False
def get_auth_id_from_user_id(user_id, include_deleted=False):
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=include_deleted)
return (
None if assoc_by_user_id_model is None else
assoc_by_user_id_model.firebase_auth_id)
def get_multi_auth_ids_from_user_ids(user_ids):
return [
None if model is None else model.firebase_auth_id
for model in auth_models.UserAuthDetailsModel.get_multi(user_ids)
]
def get_user_id_from_auth_id(auth_id, include_deleted=False):
(assoc_by_auth_id_model,) = (
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[auth_id], include_deleted=include_deleted))
return (
None if assoc_by_auth_id_model is None else
assoc_by_auth_id_model.user_id)
def get_multi_user_ids_from_auth_ids(auth_ids):
return [
None if model is None else model.user_id
for model in auth_models.UserIdByFirebaseAuthIdModel.get_multi(auth_ids)
]
def associate_auth_id_with_user_id(auth_id_user_id_pair):
auth_id, user_id = auth_id_user_id_pair
user_id_collision = get_user_id_from_auth_id(auth_id, include_deleted=True)
if user_id_collision is not None:
raise Exception('auth_id=%r is already associated with user_id=%r' % (
auth_id, user_id_collision))
auth_id_collision = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id_collision is not None:
raise Exception('user_id=%r is already associated with auth_id=%r' % (
user_id, auth_id_collision))
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None, even with
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id))
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
# new model rather than update an existing one.
#
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None):
assoc_by_user_id_model = auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
def associate_multi_auth_ids_with_user_ids(auth_id_user_id_pairs):
# Turn list(pair) to pair(list): https://stackoverflow.com/a/7558990/4859885
auth_ids, user_ids = python_utils.ZIP(*auth_id_user_id_pairs)
user_id_collisions = get_multi_user_ids_from_auth_ids(auth_ids)
if any(user_id is not None for user_id in user_id_collisions):
user_id_collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (auth_id, user_id)
for auth_id, user_id in python_utils.ZIP(
auth_ids, user_id_collisions)
if user_id is not None)
raise Exception('already associated: %s' % user_id_collisions)
auth_id_collisions = get_multi_auth_ids_from_user_ids(user_ids)
if any(auth_id is not None for auth_id in auth_id_collisions):
auth_id_collisions = ', '.join(
'{user_id=%r: auth_id=%r}' % (user_id, auth_id)
for user_id, auth_id in python_utils.ZIP(
user_ids, auth_id_collisions)
if auth_id is not None)
raise Exception('already associated: %s' % auth_id_collisions)
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None.
assoc_by_auth_id_models = [
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id)
for auth_id, user_id in python_utils.ZIP(auth_ids, user_ids)
]
auth_models.UserIdByFirebaseAuthIdModel.update_timestamps_multi(
assoc_by_auth_id_models)
auth_models.UserIdByFirebaseAuthIdModel.put_multi(assoc_by_auth_id_models)
# create a new model rather than update an existing one.
assoc_by_user_id_models = [
auth_models.UserAuthDetailsModel(id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id, assoc_by_user_id_model in python_utils.ZIP(
auth_ids, user_ids,
auth_models.UserAuthDetailsModel.get_multi(user_ids))
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None)
]
if assoc_by_user_id_models:
auth_models.UserAuthDetailsModel.update_timestamps_multi(
assoc_by_user_id_models)
auth_models.UserAuthDetailsModel.put_multi(assoc_by_user_id_models)
def grant_super_admin_privileges(user_id):
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
custom_claims = '{"role":"%s"}' % feconf.FIREBASE_ROLE_SUPER_ADMIN
firebase_auth.set_custom_user_claims(auth_id, custom_claims)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def revoke_super_admin_privileges(user_id):
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
firebase_auth.set_custom_user_claims(auth_id, None)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def seed_firebase():
seed_model = auth_models.FirebaseSeedModel.get(
auth_models.ONLY_FIREBASE_SEED_MODEL_ID, strict=False)
if seed_model is None: # Exactly 1 seed model must exist.
auth_models.FirebaseSeedModel(
id=auth_models.ONLY_FIREBASE_SEED_MODEL_ID).put()
user_ids_with_admin_email = [
key.id() for key in user_models.UserSettingsModel.query(
user_models.UserSettingsModel.email == feconf.ADMIN_EMAIL_ADDRESS
).iter(keys_only=True)
]
assoc_by_user_id_models = [
model for model in auth_models.UserAuthDetailsModel.get_multi(
user_ids_with_admin_email)
if model is not None and model.gae_id != feconf.SYSTEM_COMMITTER_ID
]
if len(assoc_by_user_id_models) != 1:
raise Exception(
'%s must correspond to exactly 1 user (excluding user_id=%s), but '
'found user_ids=[%s]' % (
feconf.ADMIN_EMAIL_ADDRESS, feconf.SYSTEM_COMMITTER_ID,
', '.join(m.id for m in assoc_by_user_id_models)))
else:
assoc_by_user_id_model = assoc_by_user_id_models[0]
user_id = assoc_by_user_id_model.id
auth_id = assoc_by_user_id_model.firebase_auth_id
if auth_id is None:
auth_id = user_id[4:] if user_id.startswith('uid_') else user_id
assoc_by_user_id_model.firebase_auth_id = auth_id
assoc_by_user_id_model.update_timestamps(update_last_updated_time=False)
assoc_by_user_id_model.put()
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel.get(auth_id, strict=False))
if assoc_by_auth_id_model is None:
auth_models.UserIdByFirebaseAuthIdModel(
id=auth_id, user_id=user_id).put()
elif assoc_by_auth_id_model.user_id != user_id:
assoc_by_auth_id_model.user_id = user_id
assoc_by_auth_id_model.update_timestamps(update_last_updated_time=False)
assoc_by_auth_id_model.put()
custom_claims = '{"role":"%s"}' % feconf.FIREBASE_ROLE_SUPER_ADMIN
try:
user = firebase_auth.get_user_by_email(feconf.ADMIN_EMAIL_ADDRESS)
except firebase_auth.UserNotFoundError:
create_new_firebase_account = True
else:
if user.uid != auth_id:
firebase_auth.update_user(user.uid, disabled=True)
firebase_auth.delete_user(user.uid)
create_new_firebase_account = True
else:
firebase_auth.set_custom_user_claims(user.uid, custom_claims)
create_new_firebase_account = False
if create_new_firebase_account:
firebase_auth.import_users([
firebase_auth.ImportUserRecord(
auth_id, email=feconf.ADMIN_EMAIL_ADDRESS,
custom_claims=custom_claims),
])
def _get_session_cookie(request):
return request.cookies.get(feconf.FIREBASE_SESSION_COOKIE_NAME)
def _get_id_token(request):
scheme, _, token = request.headers.get('Authorization', '').partition(' ')
return token if scheme == 'Bearer' else None
def _get_auth_claims_from_session_cookie(cookie):
# It's OK for a session cookie to be None or empty, it just means that the
if not cookie:
return None
try:
claims = firebase_auth.verify_session_cookie(cookie, check_revoked=True)
except firebase_auth.ExpiredSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has expired')
except firebase_auth.RevokedSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has been revoked')
except (firebase_exceptions.FirebaseError, ValueError) as error:
raise auth_domain.InvalidAuthSessionError('session invalid: %s' % error)
else:
return _create_auth_claims(claims)
def _create_auth_claims(firebase_claims):
auth_id = firebase_claims.get('sub')
email = firebase_claims.get('email')
role_is_super_admin = (
email == feconf.ADMIN_EMAIL_ADDRESS or
firebase_claims.get('role') == feconf.FIREBASE_ROLE_SUPER_ADMIN)
return auth_domain.AuthClaims(
auth_id, email, role_is_super_admin=role_is_super_admin)
| true | true |
1c321f97ee430e4a3ee9e112f6ace089525c4b15 | 1,187 | py | Python | django_kmatch/fields.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | null | null | null | django_kmatch/fields.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | 2 | 2015-03-27T18:10:34.000Z | 2015-03-30T17:39:44.000Z | django_kmatch/fields.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | 5 | 2015-03-27T17:49:20.000Z | 2016-11-28T22:29:54.000Z | from jsonfield import JSONField
from kmatch import K
class KField(JSONField):
"""Stores a kmatch pattern and returns a compiled K object.
The KField field stores a kmatch pattern in a JSONField. The pattern is compiled and returned as
a K object when accessing the field. Invalid kmatch patterns cannot be stored.
"""
description = 'A kmatch pattern'
def pre_init(self, value, obj):
"""
Used to obtain a K object for a provided pattern. Normally this is done in the to_python method
of a Django custom field. However, this field inherits JSONField, and JSONField had to do
conversions in the pre_init method.
"""
value = super(KField, self).pre_init(value, obj)
return K(value) if not isinstance(value, K) and value is not None else value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Converts a K object to a pattern. This pattern will be serialized to JSON and saved as a
TextField.
"""
if isinstance(value, K):
value = value.pattern
return super(KField, self).get_db_prep_value(value, connection, prepared=False)
| 39.566667 | 103 | 0.680708 | from jsonfield import JSONField
from kmatch import K
class KField(JSONField):
description = 'A kmatch pattern'
def pre_init(self, value, obj):
value = super(KField, self).pre_init(value, obj)
return K(value) if not isinstance(value, K) and value is not None else value
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, K):
value = value.pattern
return super(KField, self).get_db_prep_value(value, connection, prepared=False)
| true | true |
1c322128e9fb297f1e65f06a7d4a1823b754ab52 | 9,854 | py | Python | tests/python/contrib/test_hexagon/test_launcher.py | HeRCLab/tvm | bd14a4d36e0d364ef9bd34b2ee96cc09ce64d4b3 | [
"Apache-2.0"
] | null | null | null | tests/python/contrib/test_hexagon/test_launcher.py | HeRCLab/tvm | bd14a4d36e0d364ef9bd34b2ee96cc09ce64d4b3 | [
"Apache-2.0"
] | null | null | null | tests/python/contrib/test_hexagon/test_launcher.py | HeRCLab/tvm | bd14a4d36e0d364ef9bd34b2ee96cc09ce64d4b3 | [
"Apache-2.0"
] | 1 | 2022-03-02T16:24:54.000Z | 2022-03-02T16:24:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import numpy as np
import logging
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.contrib import utils, ndk
from tvm.contrib.hexagon.build import HexagonLauncher
import tvm.contrib.hexagon.hexagon as hexagon
from .conftest import requires_hexagon_toolchain
@requires_hexagon_toolchain
def test_add(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "int8"
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
sched, [A, B, C], tvm.target.Target(target_hexagon, host=target_hexagon), name="add"
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
A_data = tvm.nd.array(np.array([2, 3], dtype=dtype), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype=dtype), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype=dtype), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
mod["add"](A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
launcher.close()
@requires_hexagon_toolchain
def test_add_vtcm(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "int8"
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
sched, [A, B, C], tvm.target.Target(target_hexagon, host=target_hexagon), name="add"
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
A_data = tvm.nd.empty(A.shape, A.dtype, sess.device, "global.vtcm")
A_data.copyfrom(np.array([2, 3]))
B_data = tvm.nd.empty(B.shape, B.dtype, sess.device, "global.vtcm")
B_data.copyfrom(np.array([4]))
C_data = tvm.nd.empty(C.shape, C.dtype, sess.device, "global.vtcm")
C_data.copyfrom(np.array([0, 0]))
mod["add"](A_data, B_data, C_data)
result = C_data.numpy()
assert (result == np.array([6, 7])).all()
launcher.close()
class TestMatMul:
M = tvm.testing.parameter(32)
N = tvm.testing.parameter(32)
K = tvm.testing.parameter(32)
@requires_hexagon_toolchain
def test_matmul(self, android_serial_number, tvm_tracker_host, tvm_tracker_port, M, N, K):
X = te.placeholder((M, K), dtype="float32")
Y = te.placeholder((K, N), dtype="float32")
k1 = te.reduce_axis((0, K), name="k1")
Z = te.compute((M, N), lambda i, j: te.sum(X[i, k1] * Y[k1, j], axis=[k1]))
schedule = te.create_schedule(Z.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
schedule, [X, Y, Z], tvm.target.Target(target_hexagon, host=target_hexagon)
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(
rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port
)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
x = np.random.uniform(size=[i.value for i in X.shape]).astype(X.dtype)
y = np.random.uniform(size=[i.value for i in Y.shape]).astype(Y.dtype)
z = np.zeros([i.value for i in Z.shape], dtype=Z.dtype)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
xt = tvm.nd.array(x, device=sess.device)
yt = tvm.nd.array(y, device=sess.device)
zt = tvm.nd.array(z, device=sess.device)
mod(xt, yt, zt)
target_llvm = tvm.target.Target("llvm")
mod = tvm.build(schedule, [X, Y, Z], tvm.target.Target(target_llvm, host=target_llvm))
device = tvm.cpu(0)
xtcpu = tvm.nd.array(x, device)
ytcpu = tvm.nd.array(y, device)
ztcpu = tvm.nd.array(z, device)
mod(xtcpu, ytcpu, ztcpu)
launcher.close()
tvm.testing.assert_allclose(zt.numpy(), ztcpu.numpy(), rtol=1e-4)
@requires_hexagon_toolchain
def test_graph_executor(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
lowered.get_lib().save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
graph_mod = launcher.get_graph_executor(lowered, dso_binary)
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
graph_mod.set_input(weight=weight_in)
graph_mod.run(data=data_in)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(weight=weight_in)
llvm_graph_mod.run(data=data_in)
expected_output = llvm_graph_mod.get_output(0).numpy()
launcher.close()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| 36.496296 | 98 | 0.667242 |
import sys
import pytest
import numpy as np
import logging
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.contrib import utils, ndk
from tvm.contrib.hexagon.build import HexagonLauncher
import tvm.contrib.hexagon.hexagon as hexagon
from .conftest import requires_hexagon_toolchain
@requires_hexagon_toolchain
def test_add(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "int8"
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
sched, [A, B, C], tvm.target.Target(target_hexagon, host=target_hexagon), name="add"
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
A_data = tvm.nd.array(np.array([2, 3], dtype=dtype), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype=dtype), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype=dtype), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
mod["add"](A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
launcher.close()
@requires_hexagon_toolchain
def test_add_vtcm(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "int8"
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
sched, [A, B, C], tvm.target.Target(target_hexagon, host=target_hexagon), name="add"
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
A_data = tvm.nd.empty(A.shape, A.dtype, sess.device, "global.vtcm")
A_data.copyfrom(np.array([2, 3]))
B_data = tvm.nd.empty(B.shape, B.dtype, sess.device, "global.vtcm")
B_data.copyfrom(np.array([4]))
C_data = tvm.nd.empty(C.shape, C.dtype, sess.device, "global.vtcm")
C_data.copyfrom(np.array([0, 0]))
mod["add"](A_data, B_data, C_data)
result = C_data.numpy()
assert (result == np.array([6, 7])).all()
launcher.close()
class TestMatMul:
M = tvm.testing.parameter(32)
N = tvm.testing.parameter(32)
K = tvm.testing.parameter(32)
@requires_hexagon_toolchain
def test_matmul(self, android_serial_number, tvm_tracker_host, tvm_tracker_port, M, N, K):
X = te.placeholder((M, K), dtype="float32")
Y = te.placeholder((K, N), dtype="float32")
k1 = te.reduce_axis((0, K), name="k1")
Z = te.compute((M, N), lambda i, j: te.sum(X[i, k1] * Y[k1, j], axis=[k1]))
schedule = te.create_schedule(Z.op)
target_hexagon = tvm.target.hexagon("v68", link_params=True)
func = tvm.build(
schedule, [X, Y, Z], tvm.target.Target(target_hexagon, host=target_hexagon)
)
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
func.save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(
rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port
)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
x = np.random.uniform(size=[i.value for i in X.shape]).astype(X.dtype)
y = np.random.uniform(size=[i.value for i in Y.shape]).astype(Y.dtype)
z = np.zeros([i.value for i in Z.shape], dtype=Z.dtype)
with launcher.session as sess:
mod = launcher.get_module(dso_binary)
xt = tvm.nd.array(x, device=sess.device)
yt = tvm.nd.array(y, device=sess.device)
zt = tvm.nd.array(z, device=sess.device)
mod(xt, yt, zt)
target_llvm = tvm.target.Target("llvm")
mod = tvm.build(schedule, [X, Y, Z], tvm.target.Target(target_llvm, host=target_llvm))
device = tvm.cpu(0)
xtcpu = tvm.nd.array(x, device)
ytcpu = tvm.nd.array(y, device)
ztcpu = tvm.nd.array(z, device)
mod(xtcpu, ytcpu, ztcpu)
launcher.close()
tvm.testing.assert_allclose(zt.numpy(), ztcpu.numpy(), rtol=1e-4)
@requires_hexagon_toolchain
def test_graph_executor(android_serial_number, tvm_tracker_host, tvm_tracker_port):
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
temp = utils.tempdir()
dso_binary = "test_binary.so"
dso_binary_path = temp.relpath(dso_binary)
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
lowered.get_lib().save(dso_binary_path)
if not android_serial_number:
pytest.skip("Skip hardware test since ANDROID_SERIAL_NUMBER is not set.")
launcher = HexagonLauncher(serial_number=android_serial_number)
launcher.android_run_rpc(rpc_tracker_host=tvm_tracker_host, rpc_tracker_port=tvm_tracker_port)
launcher.hexagon_setup()
remote_kw = {
"host": tvm_tracker_host,
"port": tvm_tracker_port,
"priority": 0,
"timeout": 60,
}
launcher.hexagon_session_setup(remote_kw)
launcher.upload(dso_binary_path, dso_binary)
graph_mod = launcher.get_graph_executor(lowered, dso_binary)
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
graph_mod.set_input(weight=weight_in)
graph_mod.run(data=data_in)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(weight=weight_in)
llvm_graph_mod.run(data=data_in)
expected_output = llvm_graph_mod.get_output(0).numpy()
launcher.close()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| true | true |
1c32213da081ce5136d5611d545b3075a72813fe | 358 | py | Python | abc/abc130/abc130d.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc130/abc130d.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc130/abc130d.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | # しゃくとり法
N, K = map(int, input().split())
a = list(map(int, input().split()))
result = 0
i = 0
j = 0
v = 0
while True:
v += a[j]
if v < K:
j += 1
else:
result += N - j
v -= a[i]
if j > i:
v -= a[j]
i += 1
if j < i:
j += 1
if j == N:
print(result)
break
| 14.916667 | 35 | 0.351955 |
N, K = map(int, input().split())
a = list(map(int, input().split()))
result = 0
i = 0
j = 0
v = 0
while True:
v += a[j]
if v < K:
j += 1
else:
result += N - j
v -= a[i]
if j > i:
v -= a[j]
i += 1
if j < i:
j += 1
if j == N:
print(result)
break
| true | true |
1c3221aae6ede31defc7380c964dc41d657f7f66 | 15,720 | py | Python | sdk/python/lib/pulumi/output.py | geekflyer/pulumi | ea8ababc87fba54c86cf378b45531b34bdbcf488 | [
"Apache-2.0"
] | null | null | null | sdk/python/lib/pulumi/output.py | geekflyer/pulumi | ea8ababc87fba54c86cf378b45531b34bdbcf488 | [
"Apache-2.0"
] | null | null | null | sdk/python/lib/pulumi/output.py | geekflyer/pulumi | ea8ababc87fba54c86cf378b45531b34bdbcf488 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from functools import reduce
from inspect import isawaitable
from typing import (
TypeVar,
Generic,
Set,
Callable,
Awaitable,
Union,
cast,
Mapping,
Any,
List,
Optional,
TYPE_CHECKING
)
from . import runtime
from .runtime import known_types
from .runtime import rpc
if TYPE_CHECKING:
from .resource import Resource
T = TypeVar('T')
U = TypeVar('U')
Input = Union[T, Awaitable[T], 'Output[T]']
Inputs = Mapping[str, Input[Any]]
@known_types.output
class Output(Generic[T]):
"""
Output helps encode the relationship between Resources in a Pulumi application. Specifically an
Output holds onto a piece of Data and the Resource it was generated from. An Output value can
then be provided when constructing new Resources, allowing that new Resource to know both the
value as well as the Resource the value came from. This allows for a precise 'Resource
dependency graph' to be created, which properly tracks the relationship between resources.
"""
_is_known: Awaitable[bool]
"""
Whether or not this 'Output' should actually perform .apply calls. During a preview,
an Output value may not be known (because it would have to actually be computed by doing an
'update'). In that case, we don't want to perform any .apply calls as the callbacks
may not expect an undefined value. So, instead, we just transition to another Output
value that itself knows it should not perform .apply calls.
"""
_is_secret: Awaitable[bool]
"""
Where or not this 'Output' should be treated as containing secret data. Secret outputs are tagged when
flowing across the RPC interface to the resource monitor, such that when they are persisted to disk in
our state file, they are encrypted instead of being in plaintext.
"""
_future: Awaitable[T]
"""
Future that actually produces the concrete value of this output.
"""
_resources: Set['Resource']
"""
The list of resources that this output value depends on.
"""
def __init__(self, resources: Set['Resource'], future: Awaitable[T],
is_known: Awaitable[bool], is_secret: Optional[Awaitable[bool]] = None) -> None:
is_known = asyncio.ensure_future(is_known)
future = asyncio.ensure_future(future)
async def is_value_known() -> bool:
return await is_known and not contains_unknowns(await future)
self._resources = resources
self._future = future
self._is_known = asyncio.ensure_future(is_value_known())
if is_secret is not None:
self._is_secret = asyncio.ensure_future(is_secret)
else:
self._is_secret = asyncio.Future()
self._is_secret.set_result(False)
# Private implementation details - do not document.
def resources(self) -> Set['Resource']:
return self._resources
def future(self, with_unknowns: Optional[bool] = None) -> Awaitable[T]:
# If the caller did not explicitly ask to see unknown values and the value of this output contains unnkowns,
# return None. This preserves compatibility with earlier versios of the Pulumi SDK.
async def get_value() -> T:
val = await self._future
return None if not with_unknowns and contains_unknowns(val) else val
return asyncio.ensure_future(get_value())
def is_known(self) -> Awaitable[bool]:
return self._is_known
def is_secret(self) -> Awaitable[bool]:
return self._is_secret
# End private implementation details.
def apply(self, func: Callable[[T], Input[U]], run_with_unknowns: Optional[bool] = None) -> 'Output[U]':
"""
Transforms the data of the output with the provided func. The result remains a
Output so that dependent resources can be properly tracked.
'func' is not allowed to make resources.
'func' can return other Outputs. This can be handy if you have a Output<SomeVal>
and you want to get a transitive dependency of it.
This function will be called during execution of a 'pulumi up' request. It may not run
during 'pulumi preview' (as the values of resources are of course may not be known then).
:param Callable[[T],Input[U]] func: A function that will, given this Output's value, transform the value to
an Input of some kind, where an Input is either a prompt value, a Future, or another Output of the given
type.
:return: A transformed Output obtained from running the transformation function on this Output's value.
:rtype: Output[U]
"""
result_is_known: asyncio.Future = asyncio.Future()
result_is_secret: asyncio.Future = asyncio.Future()
# The "run" coroutine actually runs the apply.
async def run() -> U:
try:
# Await this output's details.
is_known = await self._is_known
is_secret = await self._is_secret
value = await self._future
if runtime.is_dry_run():
# During previews only perform the apply if the engine was able togive us an actual value for this
# Output or if the caller is able to tolerate unknown values.
apply_during_preview = is_known or run_with_unknowns
if not apply_during_preview:
# We didn't actually run the function, our new Output is definitely
# **not** known and **not** secret
result_is_known.set_result(False)
result_is_secret.set_result(False)
return cast(U, None)
# If we are running with unknown values and the value is explicitly unknown but does not actually
# contain any unknown values, collapse its value to the unknown value. This ensures that callbacks
# that expect to see unknowns during preview in outputs that are not known will always do so.
if not is_known and run_with_unknowns and not contains_unknowns(value):
value = UNKNOWN
transformed: Input[U] = func(value)
# Transformed is an Input, meaning there are three cases:
# 1. transformed is an Output[U]
if isinstance(transformed, Output):
transformed_as_output = cast(Output[U], transformed)
# Forward along the inner output's _is_known and _is_secret values.
result_is_known.set_result(await transformed_as_output._is_known)
result_is_secret.set_result(await transformed_as_output._is_secret or is_secret)
return await transformed.future(with_unknowns=True)
# 2. transformed is an Awaitable[U]
if isawaitable(transformed):
# Since transformed is not an Output, it is both known and not a secret.
result_is_known.set_result(True)
result_is_secret.set_result(False)
return await cast(Awaitable[U], transformed)
# 3. transformed is U. It is trivially known.
result_is_known.set_result(True)
result_is_secret.set_result(False)
return cast(U, transformed)
finally:
# Always resolve the future if it hasn't been done already.
if not result_is_known.done():
# Try and set the result. This might fail if we're shutting down,
# so swallow that error if that occurs.
try:
result_is_known.set_result(False)
result_is_secret.set_result(False)
except RuntimeError:
pass
run_fut = asyncio.ensure_future(run())
return Output(self._resources, run_fut, result_is_known, result_is_secret)
def __getattr__(self, item: str) -> 'Output[Any]':
"""
Syntax sugar for retrieving attributes off of outputs.
:param str item: An attribute name.
:return: An Output of this Output's underlying value's property with the given name.
:rtype: Output[Any]
"""
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else getattr(v, item), True)
def __getitem__(self, key: Any) -> 'Output[Any]':
"""
Syntax sugar for looking up attributes dynamically off of outputs.
:param Any key: Key for the attribute dictionary.
:return: An Output of this Output's underlying value, keyed with the given key as if it were a dictionary.
:rtype: Output[Any]
"""
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else v[key], True)
@staticmethod
def from_input(val: Input[T]) -> 'Output[T]':
"""
Takes an Input value and produces an Output value from it, deeply unwrapping nested Input values as necessary
given the type.
:param Input[T] val: An Input to be converted to an Output.
:return: A deeply-unwrapped Output that is guaranteed to not contain any Input values.
:rtype: Output[T]
"""
# Is it an output already? Recurse into the value contained within it.
if isinstance(val, Output):
return val.apply(Output.from_input, True)
# Is a dict or list? Recurse into the values within them.
if isinstance(val, dict):
# Since Output.all works on lists early, serialize this dictionary into a list of lists first.
# Once we have a output of the list of properties, we can use an apply to re-hydrate it back into a dict.
transformed_items = [[k, Output.from_input(v)] for k, v in val.items()]
return Output.all(*transformed_items).apply(lambda props: {k: v for k, v in props}, True)
if isinstance(val, list):
transformed_items = [Output.from_input(v) for v in val]
return Output.all(*transformed_items)
# If it's not an output, list, or dict, it must be known and not secret
is_known_fut = asyncio.Future()
is_secret_fut = asyncio.Future()
is_known_fut.set_result(True)
is_secret_fut.set_result(False)
# Is it awaitable? If so, schedule it for execution and use the resulting future
# as the value future for a new output.
if isawaitable(val):
promise_output = Output(set(), asyncio.ensure_future(val), is_known_fut, is_secret_fut)
return promise_output.apply(Output.from_input, True)
# Is it a prompt value? Set up a new resolved future and use that as the value future.
value_fut = asyncio.Future()
value_fut.set_result(val)
return Output(set(), value_fut, is_known_fut, is_secret_fut)
@staticmethod
def secret(val: Input[T]) -> 'Output[T]':
"""
Takes an Input value and produces an Output value from it, deeply unwrapping nested Input values as necessary
given the type. It also marks the returned Output as a secret, so its contents will be persisted in an encrypted
form in state files.
:param Input[T] val: An Input to be converted to an Secret Output.
:return: A deeply-unwrapped Output that is guaranteed to not contain any Input values and is marked as a Secret.
:rtype: Output[T]
"""
o = Output.from_input(val)
is_secret = asyncio.Future()
is_secret.set_result(True)
return Output(o._resources, o._future, o._is_known, is_secret)
@staticmethod
def all(*args: List[Input[T]]) -> 'Output[List[T]]':
"""
Produces an Output of Lists from a List of Inputs.
This function can be used to combine multiple, separate Inputs into a single
Output which can then be used as the target of `apply`. Resource dependencies
are preserved in the returned Output.
:param List[Input[T]] args: A list of Inputs to convert.
:return: An output of lists, converted from an Input to prompt values.
:rtype: Output[List[T]]
"""
# Three asynchronous helper functions to assist in the implementation:
# is_known, which returns True if all of the input's values are known,
# and false if any of them are not known,
async def is_known(outputs):
is_known_futures = list(map(lambda o: o._is_known, outputs))
each_is_known = await asyncio.gather(*is_known_futures)
return all(each_is_known)
# is_secret, which returns True if any of the input values are secret, and
# false if none of them are secret.
async def is_secret(outputs):
is_secret_futures = list(map(lambda o: o._is_secret, outputs))
each_is_secret = await asyncio.gather(*is_secret_futures)
return any(each_is_secret)
# gather_futures, which aggregates the list of futures in each input to a future of a list.
async def gather_futures(outputs):
value_futures = list(map(lambda o: asyncio.ensure_future(o.future(with_unknowns=True)), outputs))
return await asyncio.gather(*value_futures)
# First, map all inputs to outputs using `from_input`.
all_outputs = list(map(Output.from_input, args))
# Merge the list of resource dependencies across all inputs.
resources = reduce(lambda acc, r: acc.union(r.resources()), all_outputs, set())
# Aggregate the list of futures into a future of lists.
value_futures = asyncio.ensure_future(gather_futures(all_outputs))
# Aggregate whether or not this output is known.
known_futures = asyncio.ensure_future(is_known(all_outputs))
secret_futures = asyncio.ensure_future(is_secret(all_outputs))
return Output(resources, value_futures, known_futures, secret_futures)
@staticmethod
def concat(*args: List[Input[str]]) -> 'Output[str]':
"""
Concatenates a collection of Input[str] into a single Output[str].
This function takes a sequence of Input[str], stringifies each, and concatenates all values
into one final string. This can be used like so:
url = Output.concat("http://", server.hostname, ":", loadBalancer.port)
:param List[Input[str]] args: A list of string Inputs to concatenate.
:return: A concatenated output string.
:rtype: Output[str]
"""
transformed_items = [Output.from_input(v) for v in args]
return Output.all(*transformed_items).apply("".join)
@known_types.unknown
class Unknown:
"""
Unknown represents a value that is unknown.
"""
def __init__(self):
pass
UNKNOWN = Unknown()
"""
UNKNOWN is the singleton unknown value.
"""
def contains_unknowns(val: Any) -> bool:
return rpc.contains_unknowns(val)
| 42.833787 | 120 | 0.6493 |
import asyncio
from functools import reduce
from inspect import isawaitable
from typing import (
TypeVar,
Generic,
Set,
Callable,
Awaitable,
Union,
cast,
Mapping,
Any,
List,
Optional,
TYPE_CHECKING
)
from . import runtime
from .runtime import known_types
from .runtime import rpc
if TYPE_CHECKING:
from .resource import Resource
T = TypeVar('T')
U = TypeVar('U')
Input = Union[T, Awaitable[T], 'Output[T]']
Inputs = Mapping[str, Input[Any]]
@known_types.output
class Output(Generic[T]):
_is_known: Awaitable[bool]
_is_secret: Awaitable[bool]
_future: Awaitable[T]
_resources: Set['Resource']
def __init__(self, resources: Set['Resource'], future: Awaitable[T],
is_known: Awaitable[bool], is_secret: Optional[Awaitable[bool]] = None) -> None:
is_known = asyncio.ensure_future(is_known)
future = asyncio.ensure_future(future)
async def is_value_known() -> bool:
return await is_known and not contains_unknowns(await future)
self._resources = resources
self._future = future
self._is_known = asyncio.ensure_future(is_value_known())
if is_secret is not None:
self._is_secret = asyncio.ensure_future(is_secret)
else:
self._is_secret = asyncio.Future()
self._is_secret.set_result(False)
def resources(self) -> Set['Resource']:
return self._resources
def future(self, with_unknowns: Optional[bool] = None) -> Awaitable[T]:
async def get_value() -> T:
val = await self._future
return None if not with_unknowns and contains_unknowns(val) else val
return asyncio.ensure_future(get_value())
def is_known(self) -> Awaitable[bool]:
return self._is_known
def is_secret(self) -> Awaitable[bool]:
return self._is_secret
def apply(self, func: Callable[[T], Input[U]], run_with_unknowns: Optional[bool] = None) -> 'Output[U]':
result_is_known: asyncio.Future = asyncio.Future()
result_is_secret: asyncio.Future = asyncio.Future()
async def run() -> U:
try:
is_known = await self._is_known
is_secret = await self._is_secret
value = await self._future
if runtime.is_dry_run():
# During previews only perform the apply if the engine was able togive us an actual value for this
# Output or if the caller is able to tolerate unknown values.
apply_during_preview = is_known or run_with_unknowns
if not apply_during_preview:
# We didn't actually run the function, our new Output is definitely
result_is_known.set_result(False)
result_is_secret.set_result(False)
return cast(U, None)
if not is_known and run_with_unknowns and not contains_unknowns(value):
value = UNKNOWN
transformed: Input[U] = func(value)
if isinstance(transformed, Output):
transformed_as_output = cast(Output[U], transformed)
result_is_known.set_result(await transformed_as_output._is_known)
result_is_secret.set_result(await transformed_as_output._is_secret or is_secret)
return await transformed.future(with_unknowns=True)
# 2. transformed is an Awaitable[U]
if isawaitable(transformed):
# Since transformed is not an Output, it is both known and not a secret.
result_is_known.set_result(True)
result_is_secret.set_result(False)
return await cast(Awaitable[U], transformed)
# 3. transformed is U. It is trivially known.
result_is_known.set_result(True)
result_is_secret.set_result(False)
return cast(U, transformed)
finally:
# Always resolve the future if it hasn't been done already.
if not result_is_known.done():
# so swallow that error if that occurs.
try:
result_is_known.set_result(False)
result_is_secret.set_result(False)
except RuntimeError:
pass
run_fut = asyncio.ensure_future(run())
return Output(self._resources, run_fut, result_is_known, result_is_secret)
def __getattr__(self, item: str) -> 'Output[Any]':
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else getattr(v, item), True)
def __getitem__(self, key: Any) -> 'Output[Any]':
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else v[key], True)
@staticmethod
def from_input(val: Input[T]) -> 'Output[T]':
# Is it an output already? Recurse into the value contained within it.
if isinstance(val, Output):
return val.apply(Output.from_input, True)
# Is a dict or list? Recurse into the values within them.
if isinstance(val, dict):
# Since Output.all works on lists early, serialize this dictionary into a list of lists first.
# Once we have a output of the list of properties, we can use an apply to re-hydrate it back into a dict.
transformed_items = [[k, Output.from_input(v)] for k, v in val.items()]
return Output.all(*transformed_items).apply(lambda props: {k: v for k, v in props}, True)
if isinstance(val, list):
transformed_items = [Output.from_input(v) for v in val]
return Output.all(*transformed_items)
# If it's not an output, list, or dict, it must be known and not secret
is_known_fut = asyncio.Future()
is_secret_fut = asyncio.Future()
is_known_fut.set_result(True)
is_secret_fut.set_result(False)
if isawaitable(val):
promise_output = Output(set(), asyncio.ensure_future(val), is_known_fut, is_secret_fut)
return promise_output.apply(Output.from_input, True)
value_fut = asyncio.Future()
value_fut.set_result(val)
return Output(set(), value_fut, is_known_fut, is_secret_fut)
@staticmethod
def secret(val: Input[T]) -> 'Output[T]':
o = Output.from_input(val)
is_secret = asyncio.Future()
is_secret.set_result(True)
return Output(o._resources, o._future, o._is_known, is_secret)
@staticmethod
def all(*args: List[Input[T]]) -> 'Output[List[T]]':
# and false if any of them are not known,
async def is_known(outputs):
is_known_futures = list(map(lambda o: o._is_known, outputs))
each_is_known = await asyncio.gather(*is_known_futures)
return all(each_is_known)
# is_secret, which returns True if any of the input values are secret, and
# false if none of them are secret.
async def is_secret(outputs):
is_secret_futures = list(map(lambda o: o._is_secret, outputs))
each_is_secret = await asyncio.gather(*is_secret_futures)
return any(each_is_secret)
# gather_futures, which aggregates the list of futures in each input to a future of a list.
async def gather_futures(outputs):
value_futures = list(map(lambda o: asyncio.ensure_future(o.future(with_unknowns=True)), outputs))
return await asyncio.gather(*value_futures)
# First, map all inputs to outputs using `from_input`.
all_outputs = list(map(Output.from_input, args))
# Merge the list of resource dependencies across all inputs.
resources = reduce(lambda acc, r: acc.union(r.resources()), all_outputs, set())
# Aggregate the list of futures into a future of lists.
value_futures = asyncio.ensure_future(gather_futures(all_outputs))
# Aggregate whether or not this output is known.
known_futures = asyncio.ensure_future(is_known(all_outputs))
secret_futures = asyncio.ensure_future(is_secret(all_outputs))
return Output(resources, value_futures, known_futures, secret_futures)
@staticmethod
def concat(*args: List[Input[str]]) -> 'Output[str]':
transformed_items = [Output.from_input(v) for v in args]
return Output.all(*transformed_items).apply("".join)
@known_types.unknown
class Unknown:
def __init__(self):
pass
UNKNOWN = Unknown()
def contains_unknowns(val: Any) -> bool:
return rpc.contains_unknowns(val)
| true | true |
1c3221b1423884c0752522c0a00db79476896656 | 305 | py | Python | Testprogramm1.py | bogdanevropin/euler_project_tasks | 0a5470ce125112e54d15eddb580f201d13ead8af | [
"MIT"
] | null | null | null | Testprogramm1.py | bogdanevropin/euler_project_tasks | 0a5470ce125112e54d15eddb580f201d13ead8af | [
"MIT"
] | null | null | null | Testprogramm1.py | bogdanevropin/euler_project_tasks | 0a5470ce125112e54d15eddb580f201d13ead8af | [
"MIT"
] | null | null | null | from collections import namedtuple
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
namedtuple("Point", ["x", "y"])
p1 = Point(x=1, y=2)
p2 = Point(x=1, y=2)
print(p1 == p2) | 21.785714 | 55 | 0.547541 | from collections import namedtuple
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
namedtuple("Point", ["x", "y"])
p1 = Point(x=1, y=2)
p2 = Point(x=1, y=2)
print(p1 == p2) | true | true |
1c32229d98353ca496267864784a5724cba819aa | 2,620 | py | Python | python/isomorphicGraph.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 1 | 2021-01-23T13:50:34.000Z | 2021-01-23T13:50:34.000Z | python/isomorphicGraph.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 40 | 2018-02-19T19:37:24.000Z | 2022-03-25T18:34:22.000Z | python/isomorphicGraph.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 1 | 2018-12-07T03:07:21.000Z | 2018-12-07T03:07:21.000Z | from mylib.graph import *
from mylib.lexicographic import allPerms
import unittest
# black widow shape isometric to square, not isometric to complete-ish graph
# 1 2
# 3 4
# black widow cycle
G1 = DiGraph()
G1.add_node(1, 2, 3, 4)
G1.set_edge(1, 2)
G1.set_edge(2, 3)
G1.set_edge(3, 4)
G1.set_edge(4, 1)
# square cycle
G2 = DiGraph()
G2.add_node(1, 2, 3, 4)
G2.set_edge(1, 2)
G2.set_edge(2, 4)
G2.set_edge(4, 3)
G2.set_edge(3, 1)
# both
G3 = DiGraph()
G3.add_node(1, 2, 3, 4)
G3.set_edge(1, 2)
G3.set_edge(2, 3)
G3.set_edge(2, 4)
G3.set_edge(3, 4)
G3.set_edge(4, 3)
G3.set_edge(4, 1)
G3.set_edge(3, 1)
def isCorrectMapping(g, h, gnodes, hnodes):
nodemap = {}
for gnode, hnode in zip(gnodes, hnodes):
nodemap[gnode] = hnode
for gu in gnodes:
for gv in gnodes:
hu = nodemap[gu]
hv = nodemap[gv]
guchildren = set(g.get_children(gu))
huchildren_guess = set(nodemap[n] for n in guchildren)
huchildren_actual = set(h.get_children(hu))
if huchildren_guess != huchildren_actual:
return False
return True
def areIsomorphic(g, h):
# check number of nodes
if len(g.get_nodes()) != len(h.get_nodes()):
return False
# check number of edges
if len(g.get_edges()) != len(h.get_edges()):
return False
# check total degrees
if g.get_total_in_degree() != h.get_total_in_degree() or g.get_total_out_degree() != h.get_total_out_degree():
return False
# check all permutations :(
gnodes = tuple(g.get_nodes())
for hnodes in allPerms(tuple(h.get_nodes())):
if isCorrectMapping(g, h, gnodes, hnodes):
return True
return False
'''
maybe remove an edge and recurse?
won't work. could remove two non-corresponding edges which leads to two isomorphic graphs
'''
class Test(unittest.TestCase):
def testIsCorrectMapping(self):
self.assertTrue(isCorrectMapping(G1, G2, [1, 2, 3, 4], [1, 2, 4, 3]))
self.assertFalse(isCorrectMapping(G1, G2, [1, 2, 3, 4], [1, 2, 3, 4]))
self.assertFalse(isCorrectMapping(G1, G3, [1, 2, 3, 4], [1, 2, 3, 4]))
def test1(self):
self.assertTrue(areIsomorphic(G1, G2))
def test2(self):
self.assertTrue(areIsomorphic(G2, G1))
def test3(self):
self.assertFalse(areIsomorphic(G1, G3))
def test4(self):
self.assertFalse(areIsomorphic(G3, G1))
def test5(self):
self.assertFalse(areIsomorphic(G3, G2))
def test6(self):
G = DiGraph()
G.add_node('a')
self.assertFalse(areIsomorphic(G1, G))
unittest.main()
| 28.791209 | 114 | 0.632824 | from mylib.graph import *
from mylib.lexicographic import allPerms
import unittest
G1 = DiGraph()
G1.add_node(1, 2, 3, 4)
G1.set_edge(1, 2)
G1.set_edge(2, 3)
G1.set_edge(3, 4)
G1.set_edge(4, 1)
G2 = DiGraph()
G2.add_node(1, 2, 3, 4)
G2.set_edge(1, 2)
G2.set_edge(2, 4)
G2.set_edge(4, 3)
G2.set_edge(3, 1)
G3 = DiGraph()
G3.add_node(1, 2, 3, 4)
G3.set_edge(1, 2)
G3.set_edge(2, 3)
G3.set_edge(2, 4)
G3.set_edge(3, 4)
G3.set_edge(4, 3)
G3.set_edge(4, 1)
G3.set_edge(3, 1)
def isCorrectMapping(g, h, gnodes, hnodes):
nodemap = {}
for gnode, hnode in zip(gnodes, hnodes):
nodemap[gnode] = hnode
for gu in gnodes:
for gv in gnodes:
hu = nodemap[gu]
hv = nodemap[gv]
guchildren = set(g.get_children(gu))
huchildren_guess = set(nodemap[n] for n in guchildren)
huchildren_actual = set(h.get_children(hu))
if huchildren_guess != huchildren_actual:
return False
return True
def areIsomorphic(g, h):
if len(g.get_nodes()) != len(h.get_nodes()):
return False
if len(g.get_edges()) != len(h.get_edges()):
return False
if g.get_total_in_degree() != h.get_total_in_degree() or g.get_total_out_degree() != h.get_total_out_degree():
return False
gnodes = tuple(g.get_nodes())
for hnodes in allPerms(tuple(h.get_nodes())):
if isCorrectMapping(g, h, gnodes, hnodes):
return True
return False
class Test(unittest.TestCase):
def testIsCorrectMapping(self):
self.assertTrue(isCorrectMapping(G1, G2, [1, 2, 3, 4], [1, 2, 4, 3]))
self.assertFalse(isCorrectMapping(G1, G2, [1, 2, 3, 4], [1, 2, 3, 4]))
self.assertFalse(isCorrectMapping(G1, G3, [1, 2, 3, 4], [1, 2, 3, 4]))
def test1(self):
self.assertTrue(areIsomorphic(G1, G2))
def test2(self):
self.assertTrue(areIsomorphic(G2, G1))
def test3(self):
self.assertFalse(areIsomorphic(G1, G3))
def test4(self):
self.assertFalse(areIsomorphic(G3, G1))
def test5(self):
self.assertFalse(areIsomorphic(G3, G2))
def test6(self):
G = DiGraph()
G.add_node('a')
self.assertFalse(areIsomorphic(G1, G))
unittest.main()
| true | true |
1c3223d8a0512ae4da1b50c7e86aec30f1708775 | 30,072 | py | Python | airflow/providers/google/cloud/hooks/tasks.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/providers/google/cloud/hooks/tasks.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/providers/google/cloud/hooks/tasks.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudTasksHook
which allows you to connect to Google Cloud Tasks service,
performing actions to queues or tasks.
"""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient, enums
from google.cloud.tasks_v2.types import FieldMask, Queue, Task
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs. Cloud Tasks allows developers to manage
the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> CloudTasksClient:
"""
Provides a client for interacting with the Google Cloud Tasks API.
:return: Google Cloud Tasks API Client
:rtype: google.cloud.tasks_v2.CloudTasksClient
"""
if not self._client:
self._client = CloudTasksClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[dict, Queue],
project_id: str,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:type location: str
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = CloudTasksClient.location_path(project_id, location)
return client.create_queue(
parent=full_location_path,
queue=task_queue,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:type location: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:type update_mask: dict or google.cloud.tasks_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
queue=task_queue,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.get_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:type location: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param results_filter: (Optional) Filter used to specify a subset of queues.
:type results_filter: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_location_path = CloudTasksClient.location_path(project_id, location)
queues = client.list_queues(
parent=full_location_path,
filter_=results_filter,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
client.delete_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.purge_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.pause_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.resume_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: str,
task_name: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:type task: dict or google.cloud.tasks_v2.types.Task
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:type task_name: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
if task_name:
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.create_task(
parent=full_queue_name,
task=task,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.get_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Task]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
tasks = client.list_tasks(
parent=full_queue_name,
response_view=response_view,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
client.delete_task(name=full_task_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.run_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 44.485207 | 110 | 0.649874 |
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient, enums
from google.cloud.tasks_v2.types import FieldMask, Queue, Task
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> CloudTasksClient:
if not self._client:
self._client = CloudTasksClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[dict, Queue],
project_id: str,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
client = self.get_conn()
if queue_name:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = CloudTasksClient.location_path(project_id, location)
return client.create_queue(
parent=full_location_path,
queue=task_queue,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
client = self.get_conn()
if queue_name and location:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
queue=task_queue,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.get_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
client = self.get_conn()
full_location_path = CloudTasksClient.location_path(project_id, location)
queues = client.list_queues(
parent=full_location_path,
filter_=results_filter,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
client.delete_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.purge_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.pause_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.resume_queue(name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: str,
task_name: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
client = self.get_conn()
if task_name:
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.create_task(
parent=full_queue_name,
task=task,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.get_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Task]:
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
tasks = client.list_tasks(
parent=full_queue_name,
response_view=response_view,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
client.delete_task(name=full_task_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.run_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
| true | true |
1c322545bc207fa9edba4b2993b4a25a2b58c14c | 1,515 | py | Python | app/utils/redis.py | edementyev/wakeupbot | 975b95efe6845589046cf185da241a4aa255caf7 | [
"MIT"
] | 1 | 2020-10-07T12:09:21.000Z | 2020-10-07T12:09:21.000Z | app/utils/redis.py | edementyev/wakeupbot | 975b95efe6845589046cf185da241a4aa255caf7 | [
"MIT"
] | 7 | 2020-12-07T09:11:01.000Z | 2022-03-02T18:15:01.000Z | app/utils/redis.py | edementyev/wakeupbot | 975b95efe6845589046cf185da241a4aa255caf7 | [
"MIT"
] | null | null | null | from typing import Optional
import aioredis
from aiogram import Dispatcher
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.utils.executor import Executor
from loguru import logger
from app import config
class BaseRedis:
def __init__(self, host: str, port: int = 6379, db: int = 0):
self.host = host
self.port = port
self.db = db
self._redis: Optional[aioredis.Redis] = None
@property
def closed(self):
return not self._redis or self._redis.closed
async def connect(self):
if self.closed:
self._redis = await aioredis.create_redis_pool(
(self.host, self.port), db=self.db
)
async def disconnect(self):
if not self.closed:
self._redis.close()
await self._redis.wait_closed()
@property
def redis(self) -> aioredis.Redis:
if self.closed:
raise RuntimeError("Redis connection is not opened")
return self._redis
storage = RedisStorage2(
host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB
)
async def on_startup(dispatcher: Dispatcher):
logger.info("Setup Redis2 Storage")
dispatcher.storage = storage
async def on_shutdown(dispatcher: Dispatcher):
logger.info("Close Redis Connection")
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
def setup(executor: Executor):
executor.on_startup(on_startup)
executor.on_shutdown(on_shutdown)
| 24.836066 | 70 | 0.679208 | from typing import Optional
import aioredis
from aiogram import Dispatcher
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.utils.executor import Executor
from loguru import logger
from app import config
class BaseRedis:
def __init__(self, host: str, port: int = 6379, db: int = 0):
self.host = host
self.port = port
self.db = db
self._redis: Optional[aioredis.Redis] = None
@property
def closed(self):
return not self._redis or self._redis.closed
async def connect(self):
if self.closed:
self._redis = await aioredis.create_redis_pool(
(self.host, self.port), db=self.db
)
async def disconnect(self):
if not self.closed:
self._redis.close()
await self._redis.wait_closed()
@property
def redis(self) -> aioredis.Redis:
if self.closed:
raise RuntimeError("Redis connection is not opened")
return self._redis
storage = RedisStorage2(
host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB
)
async def on_startup(dispatcher: Dispatcher):
logger.info("Setup Redis2 Storage")
dispatcher.storage = storage
async def on_shutdown(dispatcher: Dispatcher):
logger.info("Close Redis Connection")
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
def setup(executor: Executor):
executor.on_startup(on_startup)
executor.on_shutdown(on_shutdown)
| true | true |
1c3225b3313a077ec9edb0f0627a95ed553ca984 | 567 | py | Python | saved_exp_results/FDST-VGG16/FDST.py | Linfeng-Lee/IIM | c63bf8b023ccc6750e178112662972f721dcabe1 | [
"MIT"
] | 81 | 2020-12-10T02:38:03.000Z | 2022-03-23T04:27:39.000Z | saved_exp_results/FDST-VGG16/FDST.py | Linfeng-Lee/IIM | c63bf8b023ccc6750e178112662972f721dcabe1 | [
"MIT"
] | 29 | 2020-12-15T09:07:00.000Z | 2022-03-22T10:00:28.000Z | saved_exp_results/FDST-VGG16/FDST.py | Linfeng-Lee/IIM | c63bf8b023ccc6750e178112662972f721dcabe1 | [
"MIT"
] | 24 | 2020-12-14T02:05:16.000Z | 2022-03-10T01:26:54.000Z | from easydict import EasyDict as edict
# init
__C_FDST = edict()
cfg_data = __C_FDST
__C_FDST.TRAIN_SIZE = (512,1024)
__C_FDST.DATA_PATH = '../ProcessedData/FDST/'
__C_FDST.TRAIN_LST = 'train.txt'
__C_FDST.VAL_LST = 'val.txt'
__C_FDST.VAL4EVAL = 'val_gt_loc.txt'
__C_FDST.MEAN_STD = (
[0.452016860247, 0.447249650955, 0.431981861591],
[0.23242045939, 0.224925786257, 0.221840232611]
)
__C_FDST.LABEL_FACTOR = 1
__C_FDST.LOG_PARA = 1.
__C_FDST.RESUME_MODEL = ''#model path
__C_FDST.TRAIN_BATCH_SIZE = 6 #imgs
__C_FDST.VAL_BATCH_SIZE = 1 # must be 1
| 19.551724 | 53 | 0.738977 | from easydict import EasyDict as edict
__C_FDST = edict()
cfg_data = __C_FDST
__C_FDST.TRAIN_SIZE = (512,1024)
__C_FDST.DATA_PATH = '../ProcessedData/FDST/'
__C_FDST.TRAIN_LST = 'train.txt'
__C_FDST.VAL_LST = 'val.txt'
__C_FDST.VAL4EVAL = 'val_gt_loc.txt'
__C_FDST.MEAN_STD = (
[0.452016860247, 0.447249650955, 0.431981861591],
[0.23242045939, 0.224925786257, 0.221840232611]
)
__C_FDST.LABEL_FACTOR = 1
__C_FDST.LOG_PARA = 1.
__C_FDST.RESUME_MODEL = ''
__C_FDST.TRAIN_BATCH_SIZE = 6
__C_FDST.VAL_BATCH_SIZE = 1
| true | true |
1c3225f23e9c55d2dfc2a0e985897c13e7e998d6 | 7,486 | py | Python | venv/Lib/site-packages/pandas/core/array_algos/putmask.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/core/array_algos/putmask.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/core/array_algos/putmask.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | """
EA-compatible analogue to to np.putmask
"""
from __future__ import annotations
from typing import Any
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import (
convert_scalar_for_putitemlike,
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.missing import isna_compat
from pandas.core.arrays import ExtensionArray
def putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
"""
if lib.is_scalar(value) and isinstance(values, np.ndarray):
value = convert_scalar_for_putitemlike(value, values.dtype)
if (
not isinstance(values, np.ndarray)
or (values.dtype == object and not lib.is_scalar(value))
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
# rule = "safe", a stricter guarantee we may not have here
or (
isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
)
):
# GH#19266 using np.putmask gives unexpected results with listlike value
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
else:
values[mask] = value
else:
# GH#37833 np.putmask is more performant than __setitem__
np.putmask(values, mask, value)
def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray:
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
values : np.ndarray
`values`, updated in-place.
mask : np.ndarray[bool]
Applies to both sides (array like).
new : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(new):
new = np.broadcast_to(new, mask.shape)
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = new[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
# make sure that we have a nullable type if we have nulls
if not isna_compat(values, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
pass
elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)):
# only compare integers/floats
pass
else:
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(values.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = values.copy()
nv[mask] = nn_at
return nv
new = np.asarray(new)
if values.dtype.kind == new.dtype.kind:
# preserves dtype if possible
return _putmask_preserve(values, new, mask)
dtype = find_common_type([values.dtype, new.dtype])
# error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type
# "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], None, type,
# _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]],
# List[Any], _DTypeDict, Tuple[Any, Any]]]"
values = values.astype(dtype) # type: ignore[arg-type]
return _putmask_preserve(values, new, mask)
def _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray):
try:
new_values[mask] = new[mask]
except (IndexError, ValueError):
new_values[mask] = new
return new_values
def putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> None:
"""
np.putmask will truncate or repeat if `new` is a listlike with
len(new) != len(values). We require an exact match.
Parameters
----------
values : np.ndarray
mask : np.ndarray[bool]
new : Any
"""
if getattr(new, "ndim", 0) >= 1:
new = new.astype(values.dtype, copy=False)
# TODO: this prob needs some better checking for 2D cases
nlocs = mask.sum()
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
if nlocs == len(new):
# GH#30567
# If length of ``new`` is less than the length of ``values``,
# `np.putmask` would first repeat the ``new`` array and then
# assign the masked values hence produces incorrect result.
# `np.place` on the other hand uses the ``new`` values at it is
# to place in the masked locations of ``values``
np.place(values, mask, new)
# i.e. values[mask] = new
elif mask.shape[-1] == len(new) or len(new) == 1:
np.putmask(values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(values, mask, new)
def validate_putmask(values: ArrayLike, mask: np.ndarray) -> tuple[np.ndarray, bool]:
"""
Validate mask and check if this putmask operation is a no-op.
"""
mask = extract_bool_array(mask)
if mask.shape != values.shape:
raise ValueError("putmask: mask and data must be the same size")
noop = not mask.any()
return mask, noop
def extract_bool_array(mask: ArrayLike) -> np.ndarray:
"""
If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
"""
if isinstance(mask, ExtensionArray):
# We could have BooleanArray, Sparse[bool], ...
# Except for BooleanArray, this is equivalent to just
# np.asarray(mask, dtype=bool)
mask = mask.to_numpy(dtype=bool, na_value=False)
mask = np.asarray(mask, dtype=bool)
return mask
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
"""
Parameters
----------
values : np.ndarray
num_set : int
For putmask, this is mask.sum()
other : Any
"""
if values.dtype == object:
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]:
# https://github.com/numpy/numpy/issues/12550
# timedelta64 will incorrectly cast to int
if not is_list_like(other):
other = [other] * num_set
else:
other = list(other)
return other
| 33.123894 | 89 | 0.608068 | from __future__ import annotations
from typing import Any
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import (
convert_scalar_for_putitemlike,
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.missing import isna_compat
from pandas.core.arrays import ExtensionArray
def putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:
if lib.is_scalar(value) and isinstance(values, np.ndarray):
value = convert_scalar_for_putitemlike(value, values.dtype)
if (
not isinstance(values, np.ndarray)
or (values.dtype == object and not lib.is_scalar(value))
np.can_cast(value.dtype, values.dtype)
)
):
values[mask] = value[mask]
else:
values[mask] = value
else:
ask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray:
if not is_list_like(new):
new = np.broadcast_to(new, mask.shape)
try:
nn = new[mask]
except TypeError:
pass
else:
if not isna_compat(values, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
pass
elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)):
pass
else:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(values.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = values.copy()
nv[mask] = nn_at
return nv
new = np.asarray(new)
if values.dtype.kind == new.dtype.kind:
return _putmask_preserve(values, new, mask)
dtype = find_common_type([values.dtype, new.dtype])
# _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]],
# List[Any], _DTypeDict, Tuple[Any, Any]]]"
values = values.astype(dtype)
return _putmask_preserve(values, new, mask)
def _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray):
try:
new_values[mask] = new[mask]
except (IndexError, ValueError):
new_values[mask] = new
return new_values
def putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> None:
if getattr(new, "ndim", 0) >= 1:
new = new.astype(values.dtype, copy=False)
nlocs = mask.sum()
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
if nlocs == len(new):
np.place(values, mask, new)
elif mask.shape[-1] == len(new) or len(new) == 1:
np.putmask(values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(values, mask, new)
def validate_putmask(values: ArrayLike, mask: np.ndarray) -> tuple[np.ndarray, bool]:
mask = extract_bool_array(mask)
if mask.shape != values.shape:
raise ValueError("putmask: mask and data must be the same size")
noop = not mask.any()
return mask, noop
def extract_bool_array(mask: ArrayLike) -> np.ndarray:
if isinstance(mask, ExtensionArray):
mask = mask.to_numpy(dtype=bool, na_value=False)
mask = np.asarray(mask, dtype=bool)
return mask
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
if values.dtype == object:
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]:
if not is_list_like(other):
other = [other] * num_set
else:
other = list(other)
return other
| true | true |
1c322636747515cc2ef81e9f89448dbce5406621 | 9,800 | py | Python | snpit/core.py | thobalose/snpit | cbc649ae40104ac5ba482504503f6964f3adddbe | [
"MIT"
] | null | null | null | snpit/core.py | thobalose/snpit | cbc649ae40104ac5ba482504503f6964f3adddbe | [
"MIT"
] | null | null | null | snpit/core.py | thobalose/snpit | cbc649ae40104ac5ba482504503f6964f3adddbe | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import pkg_resources, codecs, csv
import operator
# PyVCF
import vcf
import gzip
#BioPython
from Bio import SeqIO
class snpit(object):
"""
The snpit class is designed to take a VCF file and return the most likely lineage based on Sam Lipworth's SNP-IT.
The methods have been separated so it can be incorporated into single Python scripts that processes multiple VCF files.
"""
def __init__(self,input_file=None,threshold=10):
"""
Args:
threshold: The percentage of snps above which a sample is considered to belong to a lineage.
"""
# set the threshold as a class attribute
self.threshold=threshold
# construct the relative path in the package to the library file which contains a list of all the lineages and sub-lineages
resource_path = '/'.join(('..','lib', 'library.csv'))
utf8_reader = codecs.getreader("utf-8")
# open a stream object ready for reading
library_file = pkg_resources.resource_stream("snpit", resource_path)
self.reference_snps={}
self.lineages={}
reader = csv.DictReader(utf8_reader(library_file))
# read the library file line-by-line
for record in reader:
# remove the carriage return and decode from binary
lineage_name = record['id']
# remember the lineage meta data in a dictionary
self.lineages[lineage_name]={'species':record['species'],'lineage':record['lineage'],'sublineage':record['sublineage']}
# now we know the name construct the relative path to this lineage file
lineage_path='/'.join(('..','lib',lineage_name))
# open a stream object to that file ready for reading
lineage_file = pkg_resources.resource_stream("snpit", lineage_path)
# initialise the dictionary for this lineage
self.reference_snps[lineage_name]={}
# read the lineage file, line-by-line
for line in lineage_file:
# remove the carriage return, decode from binary, and split on tabs
cols=line.rstrip().decode('UTF-8').split('\t')
# remember the base in the dictionary using the genome position as the key
self.reference_snps[lineage_name][int(cols[0])]=cols[1]
# let's check if it is compressed
if input_file.endswith("gz"):
cols=input_file.split('.')
if cols[-2]=="vcf":
self.load_vcf(input_file)
elif cols[-2]=="fasta":
self.load_fasta(input_file,compression=True)
else:
raise Exception("Only VCF and FASTA files are allowed as inputs (may be compressed with gz,bzip2)")
elif input_file.endswith("vcf"):
self.load_vcf(input_file)
elif input_file.endswith("fasta"):
self.load_fasta(input_file,compression=False)
else:
raise Exception("Only VCF and FASTA files are allowed as inputs (may be compressed with gz,bzip2)")
# then work out the lineage
(self.species,self.lineage,self.sublineage,self.percentage)=self.determine_lineage()
def load_vcf(self,vcf_file):
"""
Loads the vcf file and then, for each lineage, identify the base at each of the identifying positions in the genome.
Args:
vcf_file: Path to the VCF file to be read
"""
# setup the dictionaries of expected SNPs for each lineage
self._reset_lineage_snps()
# open the VCF file for reading
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
# read the VCF file line-by-line
for record in vcf_reader:
# consider each lineage in turn
for lineage_name in self.lineages:
# only proceed if the genome position occurs in the list of identifiable positions
if record.POS in self.reference_snps[lineage_name].keys():
# parse the record
for sample in record.samples:
geno = sample['GT'][0]
# if there is a null call, record a hyphen which won't match, regardless of the reference
if geno == '.':
self.sample_snps[lineage_name][int(record.POS)]="-"
# otherwise replace the H37Rv base with the actual base from the VCF file
elif geno != 0:
self.sample_snps[lineage_name][int(record.POS)]=record.ALT[int(geno)-1]
def load_fasta(self,fasta_file,compression=False):
"""
Loads a supplied fasta file and then, for each lineage, identify the base at each of the identifying positions
Args:
fasta_file (str): Path to the fasta file to be read
compression (bool): whether the fasta file is compressed by gz or bzip2
"""
# setup the dictionaries of expected SNPs for each lineage
self._reset_lineage_snps()
self.sample_snps={}
# open the fasta file for reading
if compression:
with gzip.open(fasta_file, 'rt') as fasta_file:
fasta_reader = SeqIO.read(fasta_file,'fasta')
else:
with open(fasta_file, 'rt') as fasta_file:
fasta_reader = SeqIO.read(fasta_file,'fasta')
# iterate through the lineages
for lineage_name in self.lineages:
self.sample_snps[lineage_name]={}
# iterate over the positions in the reference set of snps for that lineage
for pos in self.reference_snps[lineage_name]:
if pos in self.reference_snps[lineage_name].keys():
# CAUTION the GenBank File is 1-based, but the lineage files are 0-based
# Remember the nucleotide at the defining position
self.sample_snps[lineage_name][int(pos)]=fasta_reader.seq[int(pos)-1]
def _reset_lineage_snps(self):
"""
For each lineage creates a dictionary of the positions and expected nucleotides for TB that
define that lineage.
This is required because the VCF files only list changes relative to H37Rv.
Hence these dictionaries are then changed when mutations at these positions are encountered.
"""
# make the relative path to the H37Rv TB reference GenBank file
genbank_path = '/'.join(('..','lib', "H37Rv.gbk"))
# open a stream object ready for reading
genbank_file = pkg_resources.resource_filename("snpit", genbank_path)
# read the reference genome using BioPython
reference_genome=SeqIO.read(genbank_file,'genbank')
self.sample_snps={}
# iterate through the lineages
for lineage_name in self.lineages:
self.sample_snps[lineage_name]={}
# iterate over the positions in the reference set of snps for that lineage
for pos in self.reference_snps[lineage_name]:
# CAUTION the GenBank File is 1-based, but the lineage files are 0-based
# Remember the nucleotide at the defining position
self.sample_snps[lineage_name][int(pos)]=reference_genome.seq[int(pos)-1]
def determine_lineage(self):
"""
Having read the VCF file, for each lineage, calculate the percentage of SNP present in the sample.
Note that this means the percentages will not add up to 100%.
Returns:
tuple of (lineage,percentage)
"""
self.percentage={}
# consider lineage-by-lineage
for lineage_name in self.lineages:
reference_set=[]
shared=0
ref=0
for i,j in enumerate(self.reference_snps[lineage_name]):
if self.reference_snps[lineage_name][j] == self.sample_snps[lineage_name][j]:
shared+=1
ref+=1
# thereby calculate the percentage of SNPs in this sample that match the lineage
self.percentage[lineage_name]=((shared / ref) * 100)
# create an ordered list of tuples of (lineage,percentage) in descending order
self.results = sorted(self.percentage.items(), key=operator.itemgetter(1),reverse=True)
identified_lineage_name=self.results[0][0]
identified_lineage_percentage=self.results[0][1]
# if the top lineage is above the specified threshold, return the classification
if identified_lineage_percentage>self.threshold:
# look at the next-highest lineage if the top one is Lineage 4 but with no sublineage
if self.lineages[identified_lineage_name]['lineage']=="Lineage 4" and self.lineages[identified_lineage_name]['sublineage']=="":
next_lineage_name=self.results[1][0]
next_lineage_percentage=self.results[1][1]
print(next_lineage_name,next_lineage_percentage)
# if the next best lineage is ALSO lineage 4, but this one has a sublineage and is above the threshold, report that one instead
if self.lineages[next_lineage_name]['lineage']=="Lineage 4" and self.lineages[next_lineage_name]['sublineage']!="" and next_lineage_percentage>self.threshold:
identified_lineage_name=next_lineage_name
return(self.lineages[identified_lineage_name]['species'],self.lineages[identified_lineage_name]['lineage'],self.lineages[identified_lineage_name]['sublineage'],identified_lineage_percentage)
# finally, no strain must be above the threshold percentage so return Nones as "Don't know"
else:
return(None,None,None,None)
| 38.431373 | 202 | 0.632041 |
import pkg_resources, codecs, csv
import operator
import vcf
import gzip
from Bio import SeqIO
class snpit(object):
def __init__(self,input_file=None,threshold=10):
self.threshold=threshold
resource_path = '/'.join(('..','lib', 'library.csv'))
utf8_reader = codecs.getreader("utf-8")
library_file = pkg_resources.resource_stream("snpit", resource_path)
self.reference_snps={}
self.lineages={}
reader = csv.DictReader(utf8_reader(library_file))
for record in reader:
lineage_name = record['id']
self.lineages[lineage_name]={'species':record['species'],'lineage':record['lineage'],'sublineage':record['sublineage']}
lineage_path='/'.join(('..','lib',lineage_name))
lineage_file = pkg_resources.resource_stream("snpit", lineage_path)
self.reference_snps[lineage_name]={}
for line in lineage_file:
cols=line.rstrip().decode('UTF-8').split('\t')
self.reference_snps[lineage_name][int(cols[0])]=cols[1]
if input_file.endswith("gz"):
cols=input_file.split('.')
if cols[-2]=="vcf":
self.load_vcf(input_file)
elif cols[-2]=="fasta":
self.load_fasta(input_file,compression=True)
else:
raise Exception("Only VCF and FASTA files are allowed as inputs (may be compressed with gz,bzip2)")
elif input_file.endswith("vcf"):
self.load_vcf(input_file)
elif input_file.endswith("fasta"):
self.load_fasta(input_file,compression=False)
else:
raise Exception("Only VCF and FASTA files are allowed as inputs (may be compressed with gz,bzip2)")
# then work out the lineage
(self.species,self.lineage,self.sublineage,self.percentage)=self.determine_lineage()
def load_vcf(self,vcf_file):
# setup the dictionaries of expected SNPs for each lineage
self._reset_lineage_snps()
# open the VCF file for reading
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
# read the VCF file line-by-line
for record in vcf_reader:
# consider each lineage in turn
for lineage_name in self.lineages:
# only proceed if the genome position occurs in the list of identifiable positions
if record.POS in self.reference_snps[lineage_name].keys():
# parse the record
for sample in record.samples:
geno = sample['GT'][0]
# if there is a null call, record a hyphen which won't match, regardless of the reference
if geno == '.':
self.sample_snps[lineage_name][int(record.POS)]="-"
elif geno != 0:
self.sample_snps[lineage_name][int(record.POS)]=record.ALT[int(geno)-1]
def load_fasta(self,fasta_file,compression=False):
self._reset_lineage_snps()
self.sample_snps={}
if compression:
with gzip.open(fasta_file, 'rt') as fasta_file:
fasta_reader = SeqIO.read(fasta_file,'fasta')
else:
with open(fasta_file, 'rt') as fasta_file:
fasta_reader = SeqIO.read(fasta_file,'fasta')
for lineage_name in self.lineages:
self.sample_snps[lineage_name]={}
for pos in self.reference_snps[lineage_name]:
if pos in self.reference_snps[lineage_name].keys():
self.sample_snps[lineage_name][int(pos)]=fasta_reader.seq[int(pos)-1]
def _reset_lineage_snps(self):
genbank_path = '/'.join(('..','lib', "H37Rv.gbk"))
genbank_file = pkg_resources.resource_filename("snpit", genbank_path)
reference_genome=SeqIO.read(genbank_file,'genbank')
self.sample_snps={}
for lineage_name in self.lineages:
self.sample_snps[lineage_name]={}
for pos in self.reference_snps[lineage_name]:
self.sample_snps[lineage_name][int(pos)]=reference_genome.seq[int(pos)-1]
def determine_lineage(self):
self.percentage={}
for lineage_name in self.lineages:
reference_set=[]
shared=0
ref=0
for i,j in enumerate(self.reference_snps[lineage_name]):
if self.reference_snps[lineage_name][j] == self.sample_snps[lineage_name][j]:
shared+=1
ref+=1
self.percentage[lineage_name]=((shared / ref) * 100)
self.results = sorted(self.percentage.items(), key=operator.itemgetter(1),reverse=True)
identified_lineage_name=self.results[0][0]
identified_lineage_percentage=self.results[0][1]
if identified_lineage_percentage>self.threshold:
if self.lineages[identified_lineage_name]['lineage']=="Lineage 4" and self.lineages[identified_lineage_name]['sublineage']=="":
next_lineage_name=self.results[1][0]
next_lineage_percentage=self.results[1][1]
print(next_lineage_name,next_lineage_percentage)
if self.lineages[next_lineage_name]['lineage']=="Lineage 4" and self.lineages[next_lineage_name]['sublineage']!="" and next_lineage_percentage>self.threshold:
identified_lineage_name=next_lineage_name
return(self.lineages[identified_lineage_name]['species'],self.lineages[identified_lineage_name]['lineage'],self.lineages[identified_lineage_name]['sublineage'],identified_lineage_percentage)
else:
return(None,None,None,None)
| true | true |
1c32263ecd08ba9dac7a6b97477f8842ccbd58ca | 1,122 | py | Python | src/plot_perfomance.py | hoaaoh/Audio2Vec | 96711c2300646ce10878113fa0d506d703db96d7 | [
"Apache-2.0"
] | 11 | 2018-02-16T03:52:17.000Z | 2020-04-07T17:05:50.000Z | src/plot_perfomance.py | hoaaoh/Audio2Vec | 96711c2300646ce10878113fa0d506d703db96d7 | [
"Apache-2.0"
] | 2 | 2018-05-26T16:27:59.000Z | 2019-10-10T14:32:20.000Z | src/plot_perfomance.py | hoaaoh/Audio2Vec | 96711c2300646ce10878113fa0d506d703db96d7 | [
"Apache-2.0"
] | 4 | 2017-11-16T17:54:38.000Z | 2020-04-17T08:45:43.000Z | #!/usr/bin/env python3
import argparse
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import spline
def main():
AE_small_list = [ 0.730, 0.685, 0.737, 0.693, 0.881, 0.713 ]
AE_large_list = [ 0.234, 0.307, 0.400, 0.323, 0.317, 0.233 ]
### m = [ 3, 6, 10, 15, 21, 26 ] ###
NE_small_list = [ 0.390, 0.490, 0.484, 0.460, 0.351, ]
NE_large_list = [ 0.100, 0.158, 0.169, 0.150, 0.092, ]
dim = [100, 200, 400, 600, 800, 1000 ]
small_dim = [117, 234, 390, 585, 819, 1014 ]
#dim_new = np.linspace( min(dim), max(dim),300)
#AE_small_smooth = spline(dim, AE_small_list, dim_new)
#plt.plot(dim_new, AE_small_smooth , label = 'AE_small_smooth')
plt.plot(dim, AE_small_list, '-o', label='SA_small')
plt.plot(dim, AE_large_list, '-o', label='SA_large')
plt.plot(small_dim, NE_small_list, '-o', label='NE_small')
plt.plot(small_dim, NE_large_list,'-o', label='NE_large')
plt.xlabel('Representation Dimension', fontsize=12)
plt.ylabel('MAP', fontsize=12)
plt.legend()
plt.show()
return
if __name__ == '__main__':
main()
| 33 | 67 | 0.628342 |
import argparse
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import spline
def main():
AE_small_list = [ 0.730, 0.685, 0.737, 0.693, 0.881, 0.713 ]
AE_large_list = [ 0.234, 0.307, 0.400, 0.323, 0.317, 0.233 ]
list = [ 0.100, 0.158, 0.169, 0.150, 0.092, ]
dim = [100, 200, 400, 600, 800, 1000 ]
small_dim = [117, 234, 390, 585, 819, 1014 ]
plt.plot(dim, AE_small_list, '-o', label='SA_small')
plt.plot(dim, AE_large_list, '-o', label='SA_large')
plt.plot(small_dim, NE_small_list, '-o', label='NE_small')
plt.plot(small_dim, NE_large_list,'-o', label='NE_large')
plt.xlabel('Representation Dimension', fontsize=12)
plt.ylabel('MAP', fontsize=12)
plt.legend()
plt.show()
return
if __name__ == '__main__':
main()
| true | true |
1c3226c3f306914b538a7d1693092840ae2779b2 | 2,449 | py | Python | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/create_database_user_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/create_database_user_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/create_database_user_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateDatabaseUserResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""CreateDatabaseUserResponse - a model defined in huaweicloud sdk"""
super(CreateDatabaseUserResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateDatabaseUserResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.476744 | 79 | 0.553695 |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateDatabaseUserResponse(SdkResponse):
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
super(CreateDatabaseUserResponse, self).__init__()
self.discriminator = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CreateDatabaseUserResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c322804c158f7cadde9987b27a14b5122158323 | 129 | py | Python | Messages/Error.py | BrinzaBezrukoff/Local-Chat | 38eeb5d1226abde5662138c9f865005f47198767 | [
"BSD-3-Clause"
] | 2 | 2018-07-16T13:54:14.000Z | 2021-12-23T17:42:19.000Z | Messages/Error.py | BrinzaBezrukoff/Local-Chat | 38eeb5d1226abde5662138c9f865005f47198767 | [
"BSD-3-Clause"
] | null | null | null | Messages/Error.py | BrinzaBezrukoff/Local-Chat | 38eeb5d1226abde5662138c9f865005f47198767 | [
"BSD-3-Clause"
] | null | null | null | from Messages.Message import Message
class Error (Message):
def get_text(self):
return "ERROR: " + self.text
| 18.428571 | 37 | 0.643411 | from Messages.Message import Message
class Error (Message):
def get_text(self):
return "ERROR: " + self.text
| true | true |
1c32285bb36c63cc482fd1dec3c43e23ead76a16 | 272 | py | Python | raiden/tests/unit/test_pending_locks.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 2,101 | 2016-06-01T11:31:49.000Z | 2022-03-27T20:13:19.000Z | raiden/tests/unit/test_pending_locks.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 5,291 | 2016-06-01T18:14:04.000Z | 2022-03-31T11:19:09.000Z | raiden/tests/unit/test_pending_locks.py | tirkarthi/raiden | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | [
"MIT"
] | 484 | 2016-06-01T18:21:06.000Z | 2022-03-22T10:29:45.000Z | from raiden.constants import LOCKSROOT_OF_NO_LOCKS
from raiden.transfer.channel import compute_locksroot
from raiden.transfer.state import PendingLocksState
def test_empty():
locks = PendingLocksState([])
assert compute_locksroot(locks) == LOCKSROOT_OF_NO_LOCKS
| 30.222222 | 60 | 0.823529 | from raiden.constants import LOCKSROOT_OF_NO_LOCKS
from raiden.transfer.channel import compute_locksroot
from raiden.transfer.state import PendingLocksState
def test_empty():
locks = PendingLocksState([])
assert compute_locksroot(locks) == LOCKSROOT_OF_NO_LOCKS
| true | true |
1c32299f692ecef5a68d233b2b08cb4a2622bb34 | 3,817 | py | Python | Yukki/YukkiUtilities/tgcallsrun/video.py | xsyn1100/YukkiMusic-Old | 92400708b6d796f83fc6c59130176605b050e9ab | [
"MIT"
] | null | null | null | Yukki/YukkiUtilities/tgcallsrun/video.py | xsyn1100/YukkiMusic-Old | 92400708b6d796f83fc6c59130176605b050e9ab | [
"MIT"
] | null | null | null | Yukki/YukkiUtilities/tgcallsrun/video.py | xsyn1100/YukkiMusic-Old | 92400708b6d796f83fc6c59130176605b050e9ab | [
"MIT"
] | null | null | null | from pyrogram.raw.base import Update
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from pytgcalls.types import Update
from pytgcalls.types.input_stream import AudioPiped, AudioVideoPiped
from pytgcalls.types.input_stream.quality import (
HighQualityAudio,
HighQualityVideo,
LowQualityVideo,
MediumQualityVideo,
)
from pytgcalls.types.stream import StreamVideoEnded
from Yukki import app
from Yukki.config import GROUP, CHANNEL
from Yukki.YukkiUtilities.tgcallsrun.music import pytgcalls as call_py
from Yukki.YukkiUtilities.tgcallsrun.queues import (
QUEUE,
clear_queue,
get_queue,
pop_an_item,
)
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("ᴅᴏɴᴀsɪ", url=f"https://t.me/{GROUP}"),
InlineKeyboardButton("sᴜᴘᴘᴏʀᴛ", url=f"https://t.me/{CHANNEL}"),
]
]
)
async def skip_current_song(chat_id):
if chat_id in QUEUE:
chat_queue = get_queue(chat_id)
if len(chat_queue) == 1:
await call_py.leave_group_call(chat_id)
clear_queue(chat_id)
return 1
else:
try:
songname = chat_queue[1][0]
url = chat_queue[1][1]
link = chat_queue[1][2]
type = chat_queue[1][3]
Q = chat_queue[1][4]
if type == "Audio":
await call_py.change_stream(
chat_id,
AudioPiped(
url,
),
)
elif type == "Video":
if Q == 720:
hm = HighQualityVideo()
elif Q == 480:
hm = MediumQualityVideo()
elif Q == 360:
hm = LowQualityVideo()
await call_py.change_stream(
chat_id, AudioVideoPiped(url, HighQualityAudio(), hm)
)
pop_an_item(chat_id)
return [songname, link, type]
except:
await call_py.leave_group_call(chat_id)
clear_queue(chat_id)
return 2
else:
return 0
async def skip_item(chat_id, h):
if chat_id in QUEUE:
chat_queue = get_queue(chat_id)
try:
x = int(h)
songname = chat_queue[x][0]
chat_queue.pop(x)
return songname
except Exception as e:
print(e)
return 0
else:
return 0
@call_py.on_stream_end()
async def stream_end_handler(_, u: Update):
if isinstance(u, StreamVideoEnded):
chat_id = u.chat_id
print(chat_id)
op = await skip_current_song(chat_id)
if op == 1:
await app.send_message(
chat_id,
"**✅ Antrian kosong.\n\n• Assistant meninggalkan obrolan suara**",
)
elif op == 2:
await app.send_message(
chat_id,
f"**❌ terjadi kesalahan\n\n» Membersihkan antrian dan keluar dari obrolan video.**",
)
else:
await app.send_message(
chat_id,
f"**▶️ Sekarang memutar video\n\n🏷 Nama: [{op[0]}]({op[1]})**",
disable_web_page_preview=True,
reply_markup=keyboard,
)
@call_py.on_kicked()
async def kicked_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
@call_py.on_closed_voice_chat()
async def closed_voice_chat_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
@call_py.on_left()
async def left_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
| 29.137405 | 100 | 0.544931 | from pyrogram.raw.base import Update
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from pytgcalls.types import Update
from pytgcalls.types.input_stream import AudioPiped, AudioVideoPiped
from pytgcalls.types.input_stream.quality import (
HighQualityAudio,
HighQualityVideo,
LowQualityVideo,
MediumQualityVideo,
)
from pytgcalls.types.stream import StreamVideoEnded
from Yukki import app
from Yukki.config import GROUP, CHANNEL
from Yukki.YukkiUtilities.tgcallsrun.music import pytgcalls as call_py
from Yukki.YukkiUtilities.tgcallsrun.queues import (
QUEUE,
clear_queue,
get_queue,
pop_an_item,
)
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("ᴅᴏɴᴀsɪ", url=f"https://t.me/{GROUP}"),
InlineKeyboardButton("sᴜᴘᴘᴏʀᴛ", url=f"https://t.me/{CHANNEL}"),
]
]
)
async def skip_current_song(chat_id):
if chat_id in QUEUE:
chat_queue = get_queue(chat_id)
if len(chat_queue) == 1:
await call_py.leave_group_call(chat_id)
clear_queue(chat_id)
return 1
else:
try:
songname = chat_queue[1][0]
url = chat_queue[1][1]
link = chat_queue[1][2]
type = chat_queue[1][3]
Q = chat_queue[1][4]
if type == "Audio":
await call_py.change_stream(
chat_id,
AudioPiped(
url,
),
)
elif type == "Video":
if Q == 720:
hm = HighQualityVideo()
elif Q == 480:
hm = MediumQualityVideo()
elif Q == 360:
hm = LowQualityVideo()
await call_py.change_stream(
chat_id, AudioVideoPiped(url, HighQualityAudio(), hm)
)
pop_an_item(chat_id)
return [songname, link, type]
except:
await call_py.leave_group_call(chat_id)
clear_queue(chat_id)
return 2
else:
return 0
async def skip_item(chat_id, h):
if chat_id in QUEUE:
chat_queue = get_queue(chat_id)
try:
x = int(h)
songname = chat_queue[x][0]
chat_queue.pop(x)
return songname
except Exception as e:
print(e)
return 0
else:
return 0
@call_py.on_stream_end()
async def stream_end_handler(_, u: Update):
if isinstance(u, StreamVideoEnded):
chat_id = u.chat_id
print(chat_id)
op = await skip_current_song(chat_id)
if op == 1:
await app.send_message(
chat_id,
"**✅ Antrian kosong.\n\n• Assistant meninggalkan obrolan suara**",
)
elif op == 2:
await app.send_message(
chat_id,
f"**❌ terjadi kesalahan\n\n» Membersihkan antrian dan keluar dari obrolan video.**",
)
else:
await app.send_message(
chat_id,
f"**▶️ Sekarang memutar video\n\n🏷 Nama: [{op[0]}]({op[1]})**",
disable_web_page_preview=True,
reply_markup=keyboard,
)
@call_py.on_kicked()
async def kicked_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
@call_py.on_closed_voice_chat()
async def closed_voice_chat_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
@call_py.on_left()
async def left_handler(_, chat_id: int):
if chat_id in QUEUE:
clear_queue(chat_id)
| true | true |
1c322a07c3235785aa972338b81421c0efb35565 | 5,290 | py | Python | dsalgo/heap.py | dragonman164/dsalgo | 7abcc03e59afeab20e4c5dfd72bb9216bce15a54 | [
"MIT"
] | 11 | 2020-09-20T12:27:33.000Z | 2022-02-02T07:14:06.000Z | dsalgo/heap.py | nisheksharma/dsalgo | 97cd3fd44fefc5321136e98eca4537c959e39285 | [
"MIT"
] | 67 | 2020-09-25T04:39:00.000Z | 2021-10-15T05:58:31.000Z | dsalgo/heap.py | nisheksharma/dsalgo | 97cd3fd44fefc5321136e98eca4537c959e39285 | [
"MIT"
] | 43 | 2020-09-25T05:57:49.000Z | 2021-10-02T20:28:15.000Z | class Heap:
def __init__(self, type='min'):
"""
Create a Heap object
Args :
type : type of Heap ('min' or 'max') default-'min'
"""
self.size = None
self.Heap = [0]
self.type = type
__all__ = ['parent', 'leftChild', 'rightChild', 'display',
'isLeaf', 'root', 'insert', 'delete', 'to_list']
def parent(self, pos):
"""
returns parent element's position
Args:
pos : index of element
Return :
int : index of parent element
"""
if((pos - 1) // 2) >= 0:
return (pos - 1) // 2
else:
return 0
def leftChild(self, pos):
"""
returns parent element's position
Args:
pos : index of element
return :
int : index of its left child element
"""
return (2 * pos) + 1
def rightChild(self, pos):
"""
returns parent element's position
Args:
pos(int) : index of element
return :
int : index of its right child element
"""
return (2 * pos) + 2
def display(self):
"""
display heap elements
"""
for i in range(0, ((self.size+1) // 2)):
print("Parent : " + str(self.Heap[i]), end=" ")
if self.size >= self.leftChild(i):
print("Left Child : " + str(self.Heap[2 * i+1]), end=" ")
if self.size >= self.rightChild(i):
print("Right Child : " + str(self.Heap[2 * i+2]))
print()
def isLeaf(self, pos):
"""
checks the index is leaf or not
Returns :
boolean : True or False
"""
if pos >= ((self.size+1)//2) and pos <= self.size:
return True
return False
def root(self):
"""
returns root element of the Heap
"""
return self.Heap[0]
def insert(self, item):
"""
insert element in Heap
Args :
item : item to be inserted
"""
if self.size is None:
self.Heap[0] = item
self.size = 0
else:
self.size += 1
self.Heap.append(item)
current = self.size
if self.type == 'max':
while (self.Heap[current] > self.Heap[self.parent(current)]):
self.swap(current, self.parent(current))
current = self.parent(current)
elif self.type == 'min':
while self.Heap[current] < self.Heap[self.parent(current)]:
self.swap(current, self.parent(current))
current = self.parent(current)
else:
print('Non Supported Type :'+type +
'is not supported. Type can be "min" or "max"')
def swap(self, fpos, spos):
"""
swap two element's position in Heap
Args :
fpos : first position
spos : second position
"""
self.Heap[fpos], self.Heap[spos] = (self.Heap[spos], self.Heap[fpos])
def delete(self, pos):
"""
Delete an elemnet from Heap
Args :
pos : index of element to be deleted
"""
self.Heap[pos] = self.Heap[self.size]
self.Heap = self.Heap[:-1]
self.size -= 1
if self.type == 'max':
if self.Heap[pos] > self.Heap[self.parent(pos)]:
while(self.Heap[pos] > self.Heap[self.parent(pos)]):
self.swap(pos, self.parent(pos))
pos = self.parent(pos)
while(self.rightChild(pos) <= self.size):
if(self.Heap[pos] >= self.leftChild(pos) and
self.Heap[pos] >= self.rightChild(pos)):
return
if(self.Heap[self.rightChild(pos)] <=
self.Heap[self.leftChild(pos)]):
self.swap(pos, self.leftChild(pos))
pos = self.leftChild(pos)
else:
self.swap(pos, self.rightChild(pos))
pos = self.rightChild(pos)
elif self.type == 'min':
if self.Heap[pos] < self.Heap[self.parent(pos)]:
while(self.Heap[pos] < self.Heap[self.parent(pos)]):
self.swap(pos, self.parent(pos))
pos = self.parent(pos)
while(self.rightChild(pos) <= self.size):
if(self.Heap[pos] <= self.leftChild(pos) and self.Heap[pos] <=
self.rightChild(pos)):
return
if(self.Heap[self.rightChild(pos)] >=
self.Heap[self.leftChild(pos)]):
self.swap(pos, self.leftChild(pos))
pos = self.leftChild(pos)
else:
self.swap(pos, self.rightChild(pos))
pos = self.rightChild(pos)
else:
print('Non Supported Type :'+type +
'is not supported. Type can be "min" or "max"')
def to_list(self):
"""
returns python list of Heap elements
"""
return self.Heap
| 31.676647 | 78 | 0.465217 | class Heap:
def __init__(self, type='min'):
self.size = None
self.Heap = [0]
self.type = type
__all__ = ['parent', 'leftChild', 'rightChild', 'display',
'isLeaf', 'root', 'insert', 'delete', 'to_list']
def parent(self, pos):
if((pos - 1) // 2) >= 0:
return (pos - 1) // 2
else:
return 0
def leftChild(self, pos):
return (2 * pos) + 1
def rightChild(self, pos):
return (2 * pos) + 2
def display(self):
for i in range(0, ((self.size+1) // 2)):
print("Parent : " + str(self.Heap[i]), end=" ")
if self.size >= self.leftChild(i):
print("Left Child : " + str(self.Heap[2 * i+1]), end=" ")
if self.size >= self.rightChild(i):
print("Right Child : " + str(self.Heap[2 * i+2]))
print()
def isLeaf(self, pos):
if pos >= ((self.size+1)//2) and pos <= self.size:
return True
return False
def root(self):
return self.Heap[0]
def insert(self, item):
if self.size is None:
self.Heap[0] = item
self.size = 0
else:
self.size += 1
self.Heap.append(item)
current = self.size
if self.type == 'max':
while (self.Heap[current] > self.Heap[self.parent(current)]):
self.swap(current, self.parent(current))
current = self.parent(current)
elif self.type == 'min':
while self.Heap[current] < self.Heap[self.parent(current)]:
self.swap(current, self.parent(current))
current = self.parent(current)
else:
print('Non Supported Type :'+type +
'is not supported. Type can be "min" or "max"')
def swap(self, fpos, spos):
self.Heap[fpos], self.Heap[spos] = (self.Heap[spos], self.Heap[fpos])
def delete(self, pos):
self.Heap[pos] = self.Heap[self.size]
self.Heap = self.Heap[:-1]
self.size -= 1
if self.type == 'max':
if self.Heap[pos] > self.Heap[self.parent(pos)]:
while(self.Heap[pos] > self.Heap[self.parent(pos)]):
self.swap(pos, self.parent(pos))
pos = self.parent(pos)
while(self.rightChild(pos) <= self.size):
if(self.Heap[pos] >= self.leftChild(pos) and
self.Heap[pos] >= self.rightChild(pos)):
return
if(self.Heap[self.rightChild(pos)] <=
self.Heap[self.leftChild(pos)]):
self.swap(pos, self.leftChild(pos))
pos = self.leftChild(pos)
else:
self.swap(pos, self.rightChild(pos))
pos = self.rightChild(pos)
elif self.type == 'min':
if self.Heap[pos] < self.Heap[self.parent(pos)]:
while(self.Heap[pos] < self.Heap[self.parent(pos)]):
self.swap(pos, self.parent(pos))
pos = self.parent(pos)
while(self.rightChild(pos) <= self.size):
if(self.Heap[pos] <= self.leftChild(pos) and self.Heap[pos] <=
self.rightChild(pos)):
return
if(self.Heap[self.rightChild(pos)] >=
self.Heap[self.leftChild(pos)]):
self.swap(pos, self.leftChild(pos))
pos = self.leftChild(pos)
else:
self.swap(pos, self.rightChild(pos))
pos = self.rightChild(pos)
else:
print('Non Supported Type :'+type +
'is not supported. Type can be "min" or "max"')
def to_list(self):
return self.Heap
| true | true |
1c322a9fe35005b570619852ca2e5613452f96e4 | 1,251 | py | Python | api/activities/migrations/0002_auto_20220305_1715.py | edmon1024/activities-api | e41ab6d5dbb7eba38effe353e88d75699a713f76 | [
"MIT"
] | null | null | null | api/activities/migrations/0002_auto_20220305_1715.py | edmon1024/activities-api | e41ab6d5dbb7eba38effe353e88d75699a713f76 | [
"MIT"
] | null | null | null | api/activities/migrations/0002_auto_20220305_1715.py | edmon1024/activities-api | e41ab6d5dbb7eba38effe353e88d75699a713f76 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-03-05 23:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activities', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
migrations.AlterField(
model_name='activity',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='Updated at'),
),
migrations.AlterField(
model_name='property',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
migrations.AlterField(
model_name='property',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='Updated at'),
),
migrations.AlterField(
model_name='survey',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
]
| 30.512195 | 85 | 0.597922 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activities', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
migrations.AlterField(
model_name='activity',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='Updated at'),
),
migrations.AlterField(
model_name='property',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
migrations.AlterField(
model_name='property',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='Updated at'),
),
migrations.AlterField(
model_name='survey',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
]
| true | true |
1c322b3a952fb67fcf60a3fa4678b96e32e86725 | 8,886 | py | Python | 12_tf_obj_1/lib/calculate_map.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | 12_tf_obj_1/lib/calculate_map.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | 12_tf_obj_1/lib/calculate_map.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # Code from - https://github.com/Cartucho/mAP
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
def log_average_miss_rate(prec, rec, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if prec.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = (1 - prec)
mr = (1 - rec)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- pink -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
| 32.911111 | 123 | 0.603759 |
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
def log_average_miss_rate(prec, rec, num_images):
if prec.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = (1 - prec)
mr = (1 - rec)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
def error(msg):
print(msg)
sys.exit(0)
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
def voc_ap(rec, prec):
rec.insert(0, 0.0)
rec.append(1.0)
mrec = rec[:]
prec.insert(0, 0.0)
prec.append(0.0)
mpre = prec[:]
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i)
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
def file_lines_to_list(path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
def adjust_axes(r, t, fig, axes):
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
if true_p_bar != "":
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
plt.legend(loc='lower right')
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1):
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf()
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val)
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
if i == (len(sorted_values)-1):
adjust_axes(r, t, fig, axes)
fig.canvas.set_window_title(window_title)
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
init_height = fig.get_figheight()
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4)
height_in = height_pt / dpi
top_margin = 0.15
bottom_margin = 0.05
figure_height = height_in / (1 - top_margin - bottom_margin)
if figure_height > init_height:
fig.set_figheight(figure_height)
plt.title(plot_title, fontsize=14)
plt.xlabel(x_label, fontsize='large')
fig.tight_layout()
fig.savefig(output_path)
if to_show:
plt.show()
plt.close()
| true | true |
1c322bb27857c36f63ac501a96730df8cd2d72c3 | 877 | py | Python | alien_invasion/alien.py | MRNIU/PythonCrashCourse | c3aadf34a862d47fbe2dbf790b07a2439b225649 | [
"MIT"
] | null | null | null | alien_invasion/alien.py | MRNIU/PythonCrashCourse | c3aadf34a862d47fbe2dbf790b07a2439b225649 | [
"MIT"
] | null | null | null | alien_invasion/alien.py | MRNIU/PythonCrashCourse | c3aadf34a862d47fbe2dbf790b07a2439b225649 | [
"MIT"
] | null | null | null | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
self.screen = screen
self.ai_settings=ai_settings
self.image=pygame.image.load('images/alien.bmp')
self.rect=self.image.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.rect.y = self.rect.height
self.x = float(self.rect.x)
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)
self.rect.x = self.x
def check_edges(self):
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
| 27.40625 | 90 | 0.623717 | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
self.screen = screen
self.ai_settings=ai_settings
self.image=pygame.image.load('images/alien.bmp')
self.rect=self.image.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.rect.y = self.rect.height
self.x = float(self.rect.x)
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)
self.rect.x = self.x
def check_edges(self):
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
| true | true |
1c322d07c5fc9d23baf1e23a47c7e7a71abd791c | 801 | py | Python | src/paper/task_paper.py | maxxxbb/replication_ar2018 | 5c3da961664af0ff5a2d2b6f6a2baa3271cf2a57 | [
"MIT"
] | null | null | null | src/paper/task_paper.py | maxxxbb/replication_ar2018 | 5c3da961664af0ff5a2d2b6f6a2baa3271cf2a57 | [
"MIT"
] | null | null | null | src/paper/task_paper.py | maxxxbb/replication_ar2018 | 5c3da961664af0ff5a2d2b6f6a2baa3271cf2a57 | [
"MIT"
] | null | null | null | import shutil
import pytask
from src.config import BLD
from src.config import ROOT
from src.config import SRC
documents = ["replication_paper"]
@pytask.mark.latex(
[
"--pdf",
"--interaction=nonstopmode",
"--synctex=1",
"--cd",
"--quiet",
"--shell-escape",
]
)
@pytask.mark.parametrize(
"depends_on, produces",
[
(SRC / "paper" / f"{document}.tex", BLD / "paper" / f"{document}.pdf")
for document in documents
],
)
def task_compile_documents():
pass
@pytask.mark.parametrize(
"depends_on, produces",
[
(BLD / "paper" / f"{document}.pdf", ROOT / f"{document}.pdf")
for document in documents
],
)
def task_copy_to_root(depends_on, produces):
shutil.copy(depends_on, produces)
| 18.627907 | 78 | 0.594257 | import shutil
import pytask
from src.config import BLD
from src.config import ROOT
from src.config import SRC
documents = ["replication_paper"]
@pytask.mark.latex(
[
"--pdf",
"--interaction=nonstopmode",
"--synctex=1",
"--cd",
"--quiet",
"--shell-escape",
]
)
@pytask.mark.parametrize(
"depends_on, produces",
[
(SRC / "paper" / f"{document}.tex", BLD / "paper" / f"{document}.pdf")
for document in documents
],
)
def task_compile_documents():
pass
@pytask.mark.parametrize(
"depends_on, produces",
[
(BLD / "paper" / f"{document}.pdf", ROOT / f"{document}.pdf")
for document in documents
],
)
def task_copy_to_root(depends_on, produces):
shutil.copy(depends_on, produces)
| true | true |
1c322d7fa0ce60f6ed26d7df0b2f10a6f07d44e5 | 3,322 | py | Python | features/steps/ps_platform_throttle_report_msg.py | PolySync/core-python-api | a753863eca820954f5b8f7502c38c5a7d8db5a15 | [
"MIT"
] | null | null | null | features/steps/ps_platform_throttle_report_msg.py | PolySync/core-python-api | a753863eca820954f5b8f7502c38c5a7d8db5a15 | [
"MIT"
] | null | null | null | features/steps/ps_platform_throttle_report_msg.py | PolySync/core-python-api | a753863eca820954f5b8f7502c38c5a7d8db5a15 | [
"MIT"
] | 2 | 2018-07-22T21:07:23.000Z | 2019-03-09T14:31:09.000Z | # WARNING: Auto-generated file. Any changes are subject to being overwritten
# by setup.py build script.
#!/usr/bin/python
import time
from behave import given
from behave import when
from behave import then
from hamcrest import assert_that, equal_to
try:
import polysync.node as ps_node
from polysync.data_model.types import Py_ps_platform_throttle_report_msg
from polysync.data_model._internal.compare import ps_platform_throttle_report_msg_type_convert_testable, Py_ps_platform_throttle_report_msg_initialize_random
from polysync.data_model.message_support.ps_platform_throttle_report_msg import publish, subscribe
except ImportError:
raise ImportError(
'Py_ps_platform_throttle_report_msg module dependencies \
missing for tests, is the project built?')
@given('I have a Py_ps_platform_throttle_report_msg object')
def step_impl(context):
pass
@when('I convert it to its C API equivalent a ps_platform_throttle_report_msg')
def step_impl(context):
pass
@when('I convert the ps_platform_throttle_report_msg back to a Py_ps_platform_throttle_report_msg')
def step_impl(context):
pass
@then('the ps_platform_throttle_report_msg values are equivalent to each Py_ps_platform_throttle_report_msg value')
def step_impl(context):
msg = Py_ps_platform_throttle_report_msg_initialize_random()
result = ps_platform_throttle_report_msg_type_convert_testable(msg)
assert not result, result
@given('a ps_platform_throttle_report_msg.publish function exists')
def step_impl(context):
assert callable(publish)
@when('I try to publish something that is not of type Py_ps_platform_throttle_report_msg')
def step_impl(context):
bad_obj = "not the right type of object!"
context.exception = None
try:
publish(bad_obj)
except Exception as e:
context.exception = e
@then('a {exeption} indicates the type was not Py_ps_platform_throttle_report_msg')
def step_impl(context, exeption):
assert isinstance(context.exception, eval(exeption)), \
"Invalid exception %s - expected %s" \
% (type(context.exception).__name__, exeption)
GLOBAL_TIMESTAMP = None
GLOBAL_GUID = None
def Py_ps_platform_throttle_report_msg_handler(msg):
if msg.header.src_guid == GLOBAL_GUID:
global GLOBAL_TIMESTAMP
GLOBAL_TIMESTAMP = msg.header.timestamp
@given(u'I have a licensed PsNode for publishing Py_ps_platform_throttle_report_msg')
def step_impl(context):
assert context.node_ref
global GLOBAL_GUID
GLOBAL_GUID = context.my_guid
@given(u'I have a Py_ps_platform_throttle_report_msg')
def step_impl(context):
context.msg = Py_ps_platform_throttle_report_msg()
context.msg.header.timestamp = 0xFFFF
@given(u'I have a handler for Py_ps_platform_throttle_report_msg subscription')
def step_impl(context):
assert Py_ps_platform_throttle_report_msg_handler
subscribe(handler=Py_ps_platform_throttle_report_msg_handler)
@when(u'I publish my Py_ps_platform_throttle_report_msg')
def step_impl(context):
publish(context.msg)
@then(u'I receive the corresponding Py_ps_platform_throttle_report_msg in my handler')
def step_impl(context):
global GLOBAL_TIMESTAMP
while not GLOBAL_TIMESTAMP:
time.sleep(1)
assert_that(context.msg.header.timestamp, equal_to(GLOBAL_TIMESTAMP))
| 34.968421 | 161 | 0.792595 |
import time
from behave import given
from behave import when
from behave import then
from hamcrest import assert_that, equal_to
try:
import polysync.node as ps_node
from polysync.data_model.types import Py_ps_platform_throttle_report_msg
from polysync.data_model._internal.compare import ps_platform_throttle_report_msg_type_convert_testable, Py_ps_platform_throttle_report_msg_initialize_random
from polysync.data_model.message_support.ps_platform_throttle_report_msg import publish, subscribe
except ImportError:
raise ImportError(
'Py_ps_platform_throttle_report_msg module dependencies \
missing for tests, is the project built?')
@given('I have a Py_ps_platform_throttle_report_msg object')
def step_impl(context):
pass
@when('I convert it to its C API equivalent a ps_platform_throttle_report_msg')
def step_impl(context):
pass
@when('I convert the ps_platform_throttle_report_msg back to a Py_ps_platform_throttle_report_msg')
def step_impl(context):
pass
@then('the ps_platform_throttle_report_msg values are equivalent to each Py_ps_platform_throttle_report_msg value')
def step_impl(context):
msg = Py_ps_platform_throttle_report_msg_initialize_random()
result = ps_platform_throttle_report_msg_type_convert_testable(msg)
assert not result, result
@given('a ps_platform_throttle_report_msg.publish function exists')
def step_impl(context):
assert callable(publish)
@when('I try to publish something that is not of type Py_ps_platform_throttle_report_msg')
def step_impl(context):
bad_obj = "not the right type of object!"
context.exception = None
try:
publish(bad_obj)
except Exception as e:
context.exception = e
@then('a {exeption} indicates the type was not Py_ps_platform_throttle_report_msg')
def step_impl(context, exeption):
assert isinstance(context.exception, eval(exeption)), \
"Invalid exception %s - expected %s" \
% (type(context.exception).__name__, exeption)
GLOBAL_TIMESTAMP = None
GLOBAL_GUID = None
def Py_ps_platform_throttle_report_msg_handler(msg):
if msg.header.src_guid == GLOBAL_GUID:
global GLOBAL_TIMESTAMP
GLOBAL_TIMESTAMP = msg.header.timestamp
@given(u'I have a licensed PsNode for publishing Py_ps_platform_throttle_report_msg')
def step_impl(context):
assert context.node_ref
global GLOBAL_GUID
GLOBAL_GUID = context.my_guid
@given(u'I have a Py_ps_platform_throttle_report_msg')
def step_impl(context):
context.msg = Py_ps_platform_throttle_report_msg()
context.msg.header.timestamp = 0xFFFF
@given(u'I have a handler for Py_ps_platform_throttle_report_msg subscription')
def step_impl(context):
assert Py_ps_platform_throttle_report_msg_handler
subscribe(handler=Py_ps_platform_throttle_report_msg_handler)
@when(u'I publish my Py_ps_platform_throttle_report_msg')
def step_impl(context):
publish(context.msg)
@then(u'I receive the corresponding Py_ps_platform_throttle_report_msg in my handler')
def step_impl(context):
global GLOBAL_TIMESTAMP
while not GLOBAL_TIMESTAMP:
time.sleep(1)
assert_that(context.msg.header.timestamp, equal_to(GLOBAL_TIMESTAMP))
| true | true |
1c322d85258eb1b6d3e37e446ed35edfbd6a3ba7 | 24,617 | py | Python | plugins/trezor/qt_generic.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | plugins/trezor/qt_generic.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | plugins/trezor/qt_generic.py | lionzeye/reddelectrum | e39497aee08b08bed89efa10072d17fb1e37920c | [
"MIT"
] | null | null | null | from functools import partial
import threading
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from reddelectrum_gui.qt.util import *
from .plugin import TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from reddelectrum.i18n import _
from reddelectrum.plugins import hook, DeviceMgr
from reddelectrum.util import PrintError, UserCancelled
from reddelectrum.wallet import Wallet, Standard_Wallet
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your reddcoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"reddcoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.character_dialog.accept()
self.character_dialog = None
return data
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on %s") % self.device, show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW or self.device == 'TREZOR':
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("%d words") % count)
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from reddelectrum.keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("%s Settings") % plugin.device
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = features.bootloader_hash.encode('hex')
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', unicode(label_edit.text()))
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image # FIXME
dialog = QFileDialog(self, _("Choose Homescreen"))
filename = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has reddcoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this %s. If you have mutiple devices "
"their labels help distinguish them.")
% plugin.device)
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your reddcoins if they obtain physical "
"access to your %s.") % plugin.device)
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
if plugin.device != 'KeepKey': # Not yet supported by KK firmware
homescreen_layout = QHBoxLayout()
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a %d x %d monochrome black and "
"white image.") % (hs_rows, hs_cols))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"%s device can spend your reddcoins.") % plugin.device)
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the reddcoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| 41.794567 | 81 | 0.613966 | from functools import partial
import threading
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from reddelectrum_gui.qt.util import *
from .plugin import TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from reddelectrum.i18n import _
from reddelectrum.plugins import hook, DeviceMgr
from reddelectrum.util import PrintError, UserCancelled
from reddelectrum.wallet import Wallet, Standard_Wallet
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your reddcoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"reddcoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.character_dialog.accept()
self.character_dialog = None
return data
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on %s") % self.device, show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW or self.device == 'TREZOR':
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("%d words") % count)
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from reddelectrum.keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
class SettingsDialog(WindowModalDialog):
def __init__(self, window, plugin, keystore, device_id):
title = _("%s Settings") % plugin.device
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = features.bootloader_hash.encode('hex')
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', unicode(label_edit.text()))
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image
dialog = QFileDialog(self, _("Choose Homescreen"))
filename = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has reddcoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
label_msg = QLabel(_("Name this %s. If you have mutiple devices "
"their labels help distinguish them.")
% plugin.device)
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your reddcoins if they obtain physical "
"access to your %s.") % plugin.device)
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
if plugin.device != 'KeepKey':
homescreen_layout = QHBoxLayout()
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a %d x %d monochrome black and "
"white image.") % (hs_rows, hs_cols))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"%s device can spend your reddcoins.") % plugin.device)
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the reddcoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
invoke_client(None)
| true | true |
1c322e67190af7fee7a252a4111c49a37fc343b6 | 1,301 | py | Python | PythonSkripts/bisection.py | NMarkgraf/Quantitative-Methoden-der-W-Informatik | 0b0be8d832eadce774a01047cd978f9599d29ca5 | [
"CC0-1.0"
] | null | null | null | PythonSkripts/bisection.py | NMarkgraf/Quantitative-Methoden-der-W-Informatik | 0b0be8d832eadce774a01047cd978f9599d29ca5 | [
"CC0-1.0"
] | null | null | null | PythonSkripts/bisection.py | NMarkgraf/Quantitative-Methoden-der-W-Informatik | 0b0be8d832eadce774a01047cd978f9599d29ca5 | [
"CC0-1.0"
] | null | null | null | # ========================================================================
# Bisection-Verfahren in Python Rev. 2.0 (13. Apr. 2020)
# =============================-------------------------------------------
# (C)opyleft in 2020 by N. Markgraf (nmarkgraf@hotmail.com)
#
# ========================================================================
from math import exp, fabs
def print_iter_info(i, a, b, c, f):
print(f'Iter. {i}: a={a:.8F} f(a)={f(a):.8F} c=(a+b)/2={c:.8F} '
f'f(c)={f(c):.8F} b={b:.8F} f(b)={f(b):.8F}')
def bisection(f, a, b, max_iter=1000, epsilon=0.0001):
if f(a) * f(b) > 0:
raise ArithmeticError("Das Produkt der Intervallgrenzen muss "
"ein Vorzeichenwechsel haben!")
if a > b:
a, b = b, a
iw = b - a
for i in range(1, max_iter):
if iw < epsilon:
break
c = (a + b) / 2.0
print_iter_info(i, a, b, c, f)
if f(a)*f(c) <= 0:
b = c
else:
a = c
iw = b - a
return a, b
def fkt(x):
return exp(-x**2)-x
if __name__ == "__main__":
intervall_links, intervall_rechts = bisection(fkt, 0, 1)
print(f'Der x-Wert liegt zwischen {intervall_links:.10F} '
f'und {intervall_rechts:.10F}')
| 29.568182 | 74 | 0.420446 |
from math import exp, fabs
def print_iter_info(i, a, b, c, f):
print(f'Iter. {i}: a={a:.8F} f(a)={f(a):.8F} c=(a+b)/2={c:.8F} '
f'f(c)={f(c):.8F} b={b:.8F} f(b)={f(b):.8F}')
def bisection(f, a, b, max_iter=1000, epsilon=0.0001):
if f(a) * f(b) > 0:
raise ArithmeticError("Das Produkt der Intervallgrenzen muss "
"ein Vorzeichenwechsel haben!")
if a > b:
a, b = b, a
iw = b - a
for i in range(1, max_iter):
if iw < epsilon:
break
c = (a + b) / 2.0
print_iter_info(i, a, b, c, f)
if f(a)*f(c) <= 0:
b = c
else:
a = c
iw = b - a
return a, b
def fkt(x):
return exp(-x**2)-x
if __name__ == "__main__":
intervall_links, intervall_rechts = bisection(fkt, 0, 1)
print(f'Der x-Wert liegt zwischen {intervall_links:.10F} '
f'und {intervall_rechts:.10F}')
| true | true |
1c322e8d00d4637a3069f21b0e334e01caf84026 | 1,763 | py | Python | reviews/admin.py | shockflash/reviews | f6cf2727e56f190e48f08d5da7932ff9d7b12936 | [
"BSD-3-Clause"
] | 1 | 2015-03-01T10:39:22.000Z | 2015-03-01T10:39:22.000Z | reviews/admin.py | shockflash/reviews | f6cf2727e56f190e48f08d5da7932ff9d7b12936 | [
"BSD-3-Clause"
] | null | null | null | reviews/admin.py | shockflash/reviews | f6cf2727e56f190e48f08d5da7932ff9d7b12936 | [
"BSD-3-Clause"
] | null | null | null | from django.core import urlresolvers
from django.utils.translation import ugettext as _
from django.contrib import admin
from reviews.models import Review, ReviewSegment, Category, CategorySegment
class ReviewSegmentInline(admin.TabularInline):
model = ReviewSegment
""" no manual alteration of the segments amount, so no deletion and no
extra fields. """
extra = 0
can_delete = False
""" prevents that new segments can be added. The segments are defined
in the form, and thats it. """
max_num = 0
""" since alterations to the segments amount is disabled, manual category
changing of the existing segments is also not allowed """
readonly_fields = ('segment',)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'content_object')
raw_id_fields = ('user',)
inlines = [
ReviewSegmentInline,
]
class CategoryAdmin(admin.ModelAdmin):
list_display = ('code', 'segment_link')
search_fields = ['code']
def segment_link(self, obj):
return '<a href="../categorysegment/?q=&category__id__exact=%s">%s</a>' % (str(obj.id), _('Show all segments'))
segment_link.allow_tags = True
class CategorySegmentAdmin(admin.ModelAdmin):
list_display = ('title', 'position', 'category_link')
list_filter = ('title', 'category')
list_select_related = True
def category_link(self, obj):
return '<a href="%s">%s</a>' % (urlresolvers.reverse('admin:reviews_category_change', args=(obj.category.id,)), obj.category.code)
category_link.allow_tags = True
search_fields = ['title', 'category__code']
admin.site.register(Review, ReviewAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(CategorySegment, CategorySegmentAdmin)
| 32.648148 | 136 | 0.711855 | from django.core import urlresolvers
from django.utils.translation import ugettext as _
from django.contrib import admin
from reviews.models import Review, ReviewSegment, Category, CategorySegment
class ReviewSegmentInline(admin.TabularInline):
model = ReviewSegment
extra = 0
can_delete = False
max_num = 0
readonly_fields = ('segment',)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'content_object')
raw_id_fields = ('user',)
inlines = [
ReviewSegmentInline,
]
class CategoryAdmin(admin.ModelAdmin):
list_display = ('code', 'segment_link')
search_fields = ['code']
def segment_link(self, obj):
return '<a href="../categorysegment/?q=&category__id__exact=%s">%s</a>' % (str(obj.id), _('Show all segments'))
segment_link.allow_tags = True
class CategorySegmentAdmin(admin.ModelAdmin):
list_display = ('title', 'position', 'category_link')
list_filter = ('title', 'category')
list_select_related = True
def category_link(self, obj):
return '<a href="%s">%s</a>' % (urlresolvers.reverse('admin:reviews_category_change', args=(obj.category.id,)), obj.category.code)
category_link.allow_tags = True
search_fields = ['title', 'category__code']
admin.site.register(Review, ReviewAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(CategorySegment, CategorySegmentAdmin)
| true | true |
1c322f42afa522705e697dc4fdb80e5c9139a56a | 57,767 | py | Python | sensortoolkit/evaluation_objs/_sensor_eval.py | USEPA/sensortoolkit | a9da32fd4df492154c6e4cc570011d14e933ee83 | [
"MIT"
] | 2 | 2022-02-25T21:59:04.000Z | 2022-03-01T19:37:38.000Z | sensortoolkit/evaluation_objs/_sensor_eval.py | USEPA/sensortoolkit | a9da32fd4df492154c6e4cc570011d14e933ee83 | [
"MIT"
] | null | null | null | sensortoolkit/evaluation_objs/_sensor_eval.py | USEPA/sensortoolkit | a9da32fd4df492154c6e4cc570011d14e933ee83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Top-level analysis module for the ``sensortoolkit`` library.
Contains the front-facing ``SensorEvaluation`` class for conducting analysis
of sensor data.
===============================================================================
@Author:
| Samuel Frederick, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Fri Jul 31 08:39:37 2020
Last Updated:
Wed Jul 7 15:01:00 2021
"""
import math
import json
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sensortoolkit.calculate
import sensortoolkit.datetime_utils
import sensortoolkit.deploy
import sensortoolkit.lib_utils
import sensortoolkit.model
import sensortoolkit.param
import sensortoolkit.plotting
import sensortoolkit.qc
import sensortoolkit.reference
import sensortoolkit.ingest
from sensortoolkit import presets as _presets
class SensorEvaluation:
"""Evaluate air sensor performance for use in NSIM applications.
A class for conducting analysis for air sensors deployed at ambient,
outdoor, fixed monitoring sites using U.S. EPA's performance metrics and
targets for sensors measuring PM2.5 or O3. U.S. EPA's testing protocols and
performance metrics are intended for use with devices deployed for
non-regulatory supplemental and informational monitoring (NSIM)
applications.
Args:
sensor (sensortoolkit.AirSensor object):
The air sensor object containing datasets with parameter
measurements that will be evaluated.
param (sensortoolkit.Parameter object):
The parameter (measured environmental quantity) object containing
parameter-specific attributes as well as metrics and targets for
evaluating sensor performance.
reference (sensortoolkit.ReferenceMethod object):
The FRM/FEM reference instrument object containing datasets with
parameter measurements against which air sensor data will be
evaluated.
write_to_file (bool):
If true, evaluation statistics will be written to the
``/data/eval_stats`` sensor subdirectory. Figures will also be
written to the appropriate figures subdirectory.
**kwargs:
Keyword arguments (currently unused).
Attributes:
path (str): The project path in which data, figures, and reports
relevant to the sensor evaluation are stored.
serials (dict): A dictionary of sensor serial identifiers for each
unit in the base testing deployment.
figure_path (str): The full directory path to figures for a given
sensor make and model.
stats_path: The full directory path to evaluation statistics for a
given sensor make and model.
full_df_list (list of pandas DataFrames): List of sensor data frames
of length N (where N is the number of sensor units in a testing
group). DataFrames indexed by ``DateTime`` at recorded sampling
frequency.
hourly_df_list (list of pandas DataFrames): List of sensor data frames
of length N (where N is the number of sensor units in a testing
group). DataFrames indexed by ``DateTime`` at 1-hour averaged
sampling frequency.
daily_df_list (list of pandas DataFrames): List of sensor data frames
of length N (where N is the number of sensor units in a testing
group). DataFrames indexed by ``DateTime`` at 24-hour averaged
sampling frequency.
deploy_period_df (pandas DataFrame): A data frame containing the start
time (‘Begin’), end time (‘End’), and total duration of evaluation
period for each sensor in a deployment group.
deploy_dict (dict): A dictionary containing descriptive statistics and
textual information about the deployment (testing agency, site,
time period, etc.), sensors tested, and site conditions during the
evaluation.
deploy_bdate (pandas timestamp object): Overall start date of
deployment. Determined by selecting the earliest recorded timestamp
in sensor data frames.
deploy_edate (pandas timestamp object): Overall end date of deployment.
Determined by selecting the latest recorded timestamp in sensor
data frames.
ref_dict (dict):
A dictionary container for reference data objects at varying
averaging intervals and parameter classifications.
hourly_ref_df (pandas DataFrame):
Dataset containing reference data at 1-hour averaging intervals
for methods measuring parameters matching the parameter
classification of the parameter object passed to the
``SensorEvaluation`` class during instantation.
daily_ref_df (pandas DataFrame):
Dataset containing reference data at 24-hour averaging intervals
for methods measuring parameters matching the parameter
classification of the parameter object passed to the
``SensorEvaluation`` class during instantation.
pm_hourly_ref_df (pandas DataFrame):
Dataset containing reference data at 1-hour averaging intervals
for methods measuring particulate matter parameters.
pm_daily_ref_df (pandas DataFrame):
Dataset containing reference data at 24-hour averaging intervals
for methods measuring particulate matter parameters.
gas_hourly_ref_df (pandas DataFrame):
Dataset containing reference data at 1-hour averaging intervals
for methods measuring gaseous parameters.
gas_daily_ref_df (pandas DataFrame):
Dataset containing reference data at 24-hour averaging intervals
for methods measuring gaseous parameters.
met_hourly_ref_df (pandas DataFrame):
Dataset containing reference data at 1-hour averaging intervals
for methods measuring meteorological parameters.
met_daily_ref_df (pandas DataFrame):
Dataset containing reference data at 24-hour averaging intervals
for methods measuring meteorological parameters.
ref_name (str): The make and model of the FRM/FEM instrument used as
reference for the selected evaluation parameter. Both AirNowTech
and AQS return the AQS method code, and the AQS Sampling Methods
Reference table is used to determine the instrument name associated
with this code. AirNow does not return method codes or instrument
names. When the name and type of the FRM/FEM instrument are
unknown, ref_name takes the value ‘unknown_reference’.
avg_hrly_df (pandas DataFrame): Data frame containing the inter-sensor
average for concurrent sensor measurements at 1-hour averaging
intervals.
avg_daily_df (pandas DataFrame): Data frame containing the inter-sensor
average for concurrent sensor measurements at 24-hour averaging
intervals.
stats_df (pandas DataFrame): Data frame with OLS regression (sensor vs
FRM/FEM) statistics, including R2, slope, intercept, RMSE, N
(Number of sensor-FRM/FEM data point pairs), as well as the
minimum, maximum, and the mean sensor concentration.
avg_stats_df (pandas DataFrame): Data frame with OLS regression (sensor
vs intersensor average) statistics, including R2, slope,
intercept, RMSE, N (Number of concurrent sensor measurements during
which all sensors in the testing group reported values), as well as
the minimum, maximum, and the mean sensor concentration.
"""
def __init__(self, sensor, param, reference, write_to_file=False,
**kwargs):
self.sensor = sensor
self.name = sensor.name
self.reference = reference
try:
self.sensor.data
except AttributeError as error:
sys.exit(f'{error}, use the AirSensor.load_data() method to import'
f' data')
self.path = sensor.project_path
self.serials = sensor.serials
# Private to avoid confusion between SensorEvaluation attribute and
# paraeter attribute
self.param = param
self._param_name = param.name
if self._param_name not in self.sensor.param_headers:
raise AttributeError(f'{self._param_name} is not in the list of '
f'parameters measured by {self.name}')
self.write_to_file = write_to_file
self.testing_loc = _presets.test_loc
self.testing_org = _presets.test_org
# Add keyword arguments
self.__dict__.update(**kwargs)
self.kwargs = kwargs
# path to sensor figures
self.figure_path = os.path.join(self.path, 'figures', self.name, '')
# path to evaluation statistics
self.stats_path = os.path.join(self.path, 'data',
'eval_stats', self.name, '')
rec_int = self.sensor.recording_interval
self.full_df_list = list(self.sensor.data[rec_int].values())
self.hourly_df_list = list(self.sensor.data['1-hour'].values())
self.daily_df_list = list(self.sensor.data['24-hour'].values())
# Compute sensor deployment period and concurrent deployment groups
self.deploy_period_df = sensortoolkit.deploy.deployment_period(
self.full_df_list,
self.name,
self.serials)
self.deploy_dict = sensortoolkit.deploy.construct_deploy_dict(
self.deploy_period_df,
self.full_df_list,
self.hourly_df_list,
self.daily_df_list,
self.name,
**self.kwargs)
deploy_grps = self.deploy_dict['Deployment Groups']
deploy_bdate = min([pd.to_datetime(deploy_grps[grp]['eval_start'])
for grp in deploy_grps.keys()])
self.deploy_bdate = self.kwargs.get('deploy_bdate', deploy_bdate)
deploy_edate = max([pd.to_datetime(deploy_grps[grp]['eval_end'])
for grp in deploy_grps.keys()])
self.deploy_edate = self.kwargs.get('deploy_edate', deploy_edate)
self._assign_refdata_objs()
# Compute normalized param values
self.hourly_df_list = sensortoolkit.calculate.normalize(
self.hourly_df_list,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
self.daily_df_list = sensortoolkit.calculate.normalize(
self.daily_df_list,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
# Compute inter-sensor averaged parameter dataframes
self.avg_hrly_df = sensortoolkit.calculate.intersensor_mean(
self.hourly_df_list,
self.deploy_dict)
self.avg_daily_df = sensortoolkit.calculate.intersensor_mean(
self.daily_df_list,
self.deploy_dict)
self.stats_df = pd.DataFrame()
self.avg_stats_df = pd.DataFrame()
def _assign_refdata_objs(self):
# Retrieve reference data
self.ref_dict = self.reference.data
# Set reference dataframe based on evaluation parameter classification
self.hourly_ref_df = self.ref_dict[self.param.classifier]['1-hour']
hourly_ref_idx = self.hourly_ref_df.index
ref_param_cols = ['_Value', '_Unit', '_QAQC_Code', '_Param_Code',
'_Method', '_Method_Code', '_Method_POC']
site_cols = ['Agency', 'Site_Name', 'Site_AQS',
'Site_Lat', 'Site_Lon', 'Data_Source',
'Data_Acquisition_Date_Time']
# Unpack the ref data into dataframes. If no reference data found,
# return a dataframe backfilled with nulls.
if not self.ref_dict['PM']['1-hour'].empty:
self.pm_hourly_ref_df = self.ref_dict['PM']['1-hour']
self.pm_daily_ref_df = self.ref_dict['PM']['24-hour']
else:
cols = ['PM25' + col for col in ref_param_cols]
cols = cols + site_cols
self.pm_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
# Replace null method names with 'Unspecified Reference'
for col_name in [col for col in cols if col.endswith('_Method')]:
self.pm_hourly_ref_df[col_name] = 'Unknown Reference'
self.pm_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.pm_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
if not self.ref_dict['Gases']['1-hour'].empty:
self.gas_hourly_ref_df = self.ref_dict['Gases']['1-hour']
self.gas_daily_ref_df = self.ref_dict['Gases']['24-hour']
else:
cols = ['O3' + col for col in ref_param_cols]
cols = cols + site_cols
self.gas_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
# Replace null method names with 'Unspecified Reference'
for col_name in [col for col in cols if col.endswith('_Method')]:
self.gas_hourly_ref_df[col_name] = 'Unknown Reference'
self.gas_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.gas_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
if not self.ref_dict['Met']['1-hour'].empty:
self.met_hourly_ref_df = self.ref_dict['Met']['1-hour']
self.met_daily_ref_df = self.ref_dict['Met']['24-hour']
else:
cols = [met_param + col for col in ref_param_cols
for met_param in ['RH', 'Temp']]
cols = cols + site_cols
self.met_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
# Replace null method names with 'Unspecified Reference'
for col_name in [col for col in cols if col.endswith('_Method')]:
self.met_hourly_ref_df[col_name] = 'Unknown Reference'
self.met_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.met_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
# Get the name of the reference monitor
self.ref_name = self.reference.get_method_name(self.param.name)
self.daily_ref_df = self.ref_dict[self.param.classifier]['24-hour']
def add_deploy_dict_stats(self):
"""Populate deployment dictionary with statistical metrics.
Add precision and error performance targets metrics, include details
about reference (for selected evaluation parameter) and monitor
statistics for meteorological parameters (Temp, RH).
Calculates:
- CV for 1-hour averaged sensor datasets
- CV for 24-hour averaged sensor datasets
- RMSE for 1-hour averaged sensor datasets
- RMSE for 24-hour averaged sensor datasets
- Reference monitor concentration range, mean concentration during
testing period for 1-hour averaged measurements
- Reference monitor concentration range, mean concentration during
testing period for 24-hour averaged measurements
- Meteorological monitor measurement range, mean value for temperature
and/or relative humidity measurements at 1-hour intervals
- Meteorological monitor measurement range, mean value for temperature
and/or relative humidity measurements at 24-hour intervals
Populates:
- ``SensorEvaluation.deploy_dict``
Writes Files:
- Deployment dictionary
Returns:
None.
"""
# Compute inter-sensor precision and error metric values
# CV: 1-hour averaged sensor param
self.deploy_dict = sensortoolkit.calculate.cv(
self.hourly_df_list,
self.deploy_dict,
param=self._param_name)
# CV: 24-hour averaged sensor param
self.deploy_dict = sensortoolkit.calculate.cv(
self.daily_df_list,
self.deploy_dict,
param=self._param_name)
# RMSE: 1-hour averaged sensor param
self.deploy_dict = sensortoolkit.calculate.rmse(
self.hourly_df_list,
self.hourly_ref_df,
self.deploy_dict,
param=self._param_name)
# RMSE: 24-hour averaged sensor param
self.deploy_dict = sensortoolkit.calculate.rmse(
self.daily_df_list,
self.daily_ref_df,
self.deploy_dict,
param=self._param_name)
# Reference details for param evaluation (hourly data)
self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(
self.deploy_dict,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
# Reference details for param evaluation (daily data)
self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(
self.deploy_dict,
self.daily_ref_df,
param=self._param_name,
ref_name=self.ref_name)
# Reference details for meteorological data (1-hr averages)
self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(
self.deploy_dict,
self.hourly_df_list,
self.met_hourly_ref_df)
# Reference details for meteorological data (24-hr averages)
self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(
self.deploy_dict,
self.daily_df_list,
self.met_daily_ref_df)
if self.write_to_file is True:
today = sensortoolkit.datetime_utils.get_todays_date()
# check if sensor-specific subfolder exists
if not os.path.exists(self.stats_path):
os.makedirs(self.stats_path)
with open(self.stats_path + self.name + '_' +
self._param_name + "_Evaluation_" + today +
".json", "w") as outfile:
deploy_json = json.dumps(self.deploy_dict, indent=4)
outfile.write(deploy_json)
def calculate_metrics(self):
"""Compute hourly, daily, and inter-sensor statistics dataframes.
.. note::
``calculate_metrics()`` will check whether
``SensorEvaluation.deploy_dict`` has been populated with statistics
via the ``add_deploy_dict_stats()`` method and will call this method
if the dictionary has not been populated yet.
Calculates:
- 1-hour averaged sensor vs. reference regression statistics for each
sensor
- 24-hour averaged sensor vs. reference regression statistics for each
sensor
- 1-hour averaged sensor vs. intersensor average regression statistics
for each sensor
- 24-hour averaged sensor vs. intersensor average regression statistics
for each sensor
Populates:
- ``SensorEvaluation.stats_df``
- ``SensorEvaluation.avg_stats_df``
Writes Files:
- Statistics DataFrame - Sensor vs. FRM/FEM
- Statistics DataFrame - Sensor vs. Intersensor Average
Returns:
None.
"""
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
hourly_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.hourly_df_list,
ref_df_obj=self.hourly_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
daily_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.daily_df_list,
ref_df_obj=self.daily_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
# Combine the statistics dataframes into one
self.stats_df = sensortoolkit.calculate.join_stats(
hourly_stats,
daily_stats,
stats_path=self.stats_path,
stats_type='individual',
write_to_file=self.write_to_file)
avg_hourly_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.hourly_df_list,
ref_df_obj=self.hourly_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
avg_daily_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.daily_df_list,
ref_df_obj=self.daily_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
# Combine the statistics dataframes into one
self.avg_stats_df = sensortoolkit.calculate.join_stats(
avg_hourly_stats,
avg_daily_stats,
stats_path=self.stats_path,
stats_type='average',
write_to_file=self.write_to_file)
def plot_timeseries(self, report_fmt=True, **kwargs):
"""Plot sensor and FRM/FEM reference measurements over time.
Sensor measurements are indicated by distinct colors in a discrete
color palette. FRM/FEM measurements are shown as black lines. The
x-axis indicates the date in 5-day increments (default, although
customizable). Measurement values are plotted along the y-axis.
Args:
report_fmt (bool, optional):
If true, format figure for inclusion in a performance report.
Defaults to True.
**kwargs (dict): Plotting keyword arguments.
Returns:
None.
"""
timestamp_fmt = '%Y-%m-%d %H:%M:%S'
t_start = (self.avg_hrly_df.dropna(how='all', axis=0).index[0] -
pd.Timedelta('1D')).strftime(timestamp_fmt)
t_end = (self.avg_hrly_df.dropna(how='all', axis=0).index[-1] +
pd.Timedelta('1D')).strftime(timestamp_fmt)
avg_list = self.param.averaging
param = kwargs.get('param', self._param_name)
kwargs.pop('param', None)
if len(avg_list) == 2 and report_fmt is True:
fig, axs = plt.subplots(2, 1, figsize=(10.15, 4.1))
fig.subplots_adjust(hspace=0.7)
for i, averaging_interval in enumerate(avg_list):
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]
ref_name = self.reference.get_method_name(self.param.name)
# Prevent Sensor_Timeplot from writing to file on first
# iteration of loop
if i == 0:
write_to_file = False
if i == len(avg_list) - 1:
write_to_file = self.write_to_file
axs[i] = sensortoolkit.plotting.sensor_timeplot(
sensor_data,
ref_data,
sensor_serials=self.serials,
param=param,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=ref_name,
bdate=t_start,
edate=t_end,
averaging_interval=averaging_interval,
report_fmt=report_fmt,
write_to_file=write_to_file,
ax=axs[i],
fig=fig,
**kwargs)
if i == 0:
axs[i].get_legend().remove()
else:
averaging_interval = kwargs.get('averaging_interval', '1-hour')
kwargs.pop('averaging_interval', None)
if '1-hour' in avg_list and averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
if '24-hour' in avg_list and averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]
ref_name = ref_data[f'{param}_Method'].unique()[0]
try:
sensor_data
except NameError as error:
sys.exit(error)
sensortoolkit.plotting.sensor_timeplot(
sensor_data,
ref_data,
sensor_serials=self.serials,
param=param,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=ref_name,
bdate=t_start,
edate=t_end,
averaging_interval=averaging_interval,
report_fmt=report_fmt,
write_to_file=self.write_to_file,
**kwargs)
def plot_metrics(self, **kwargs):
"""Regression dot/boxplots for U.S EPA performance metrics and targets
developed for PM2.5 and O3 sensor evaluations.
Results for the following metrics are shown:
- Linearity:
- :math:`R^2`: The coefficient of determination, which is a measure
of linearity between sensor
and reference measurement pairs.
- Bias:
- Slope: The slope of the ordinary least-squares regression between
sensor (y-axis) and
reference (x-axis) measurements.
- Intercept: The intercept term of the ordinary least-squares
regression between sensor (y-axis) and
reference (x-axis) measurements.
- Error:
- :math:`RMSE`: The root mean square error between sensor and
reference measurements.
- :math:`NRMSE`: The normalized root mean square error between sensor
and reference measurements, where RMSE has been normalized by the
mean reference concentration during the testing period.
- Precision:
- :math:`CV`: The coefficient of variation of concurrently recorded
sensor measurements.
- :math:`SD`: The standard deviation of concurrently recorded sensor
measurements.
Results are shown as either colored dots (if the number of sensors is
less than four) or as boxplots (if the number of sensors exceeds
three). Target ranges are indicated by gray shaded regions, and target
goals are indicated by dark gray lines. Results are grouped by data
averaging interval, including 1-hour and 24-hour intervals (note that
some pollutants such as O3 are analyzed only at 1-hour intervals due to
significant diurnal variability, so the formatting of the figure will
depend on which averaging interval(s) are indicated for the parameter
via the ``sensortoolkit.Parameter.averaging`` attribute).
Args:
**kwargs (dict): Plotting keyword arguments.
Returns:
None.
"""
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
sensortoolkit.plotting.performance_metrics(
self.stats_df,
self.deploy_dict,
param=self._param_name,
param_averaging=self.param.averaging,
path=self.figure_path,
sensor_name=self.name,
write_to_file=self.write_to_file,
**kwargs)
def plot_sensor_scatter(self, averaging_interval='24-hour',
plot_subset=None, **kwargs):
"""Plot sensor vs FRM/FEM reference measurement pairs as scatter.
FRM/FEM reference concentrations are plotted along the x-axis, and
sensor concentrations are plotted along the y-axis. Measurement pairs
(i.e., concentration values for sensor and reference datasets recorded
at matching timestamp entries) are colored by the relative humidity
recorded by an independent meteorological instrument at the monitoring
site if RH data are located within the ``reference_object.data['Met']``
DataFrame.
Args:
averaging_interval (str, optional):
The measurement averaging intervals commonly utilized for
analyzing data corresponding the the selected parameter.
Defaults to '24-hour'.
plot_subset (list, optional):
A list of either sensor serial IDs or the keys associated with
the serial IDs in the serial dictionary. Defaults to None.
**Keyword Arguments**
:param dict report_fmt:
For displaying scatter plots on the
first page of the performance report included alongside U.S. EPA's
documents outlining recommended testing protocols, performance
metrics, and target values. Defaults to False.
:param **kwargs:
Additional keyword arguments passed to the underlying
``sensortoolkit.plotting.scatter_plotter()`` method.
Returns:
None.
"""
report_fmt = kwargs.get('report_fmt', False)
# Avoids multiple args passed to same param
kwargs.pop('report_fmt', None)
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
avg_list = self.param.averaging
# Figuring out averaging intervals is done if report_fmt true, no
# need to check for invalid intervals passed (will be ignored in favor
# of intervals specified by Parameter.averaging)
if not report_fmt and averaging_interval not in avg_list:
txt = ('Invalid averaging interval, choose from the following: '
+ ', '.join(avg_list))
sys.exit(txt)
if (report_fmt is True and plot_subset is not None):
if len(avg_list) == 2:
# Create a 1x2 subplot, 1-hr scatter on left and 24-hr scatter
# on right for a single sensor unit (performance report page
# 1 plot)
figsize = (5.29, 3.17)
elif len(avg_list) == 1:
# Create a 1x1 subplot, 1-hr scatter with vertical colorbar
figsize = (4.3, 3.91)
else:
sys.exit('Reporting template formatted '
'figure not specified for ' + self._param_name)
fig, axs = plt.subplots(1, len(avg_list), figsize=figsize)
fig.subplots_adjust(hspace=0.7)
for i, averaging_interval in enumerate(self.param.averaging):
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.hourly_ref_df
met_data = self.met_hourly_ref_df
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.daily_ref_df
met_data = self.met_daily_ref_df
# Prevent sub-routine from writing to file on first
# iteration of loop, also dont draw cbar on first loop
if i == 0:
write_to_file = False
kwargs['draw_cbar'] = False
if i == len(self.param.averaging) - 1:
write_to_file = self.write_to_file
kwargs['draw_cbar'] = True
if isinstance(axs, np.ndarray):
ax = axs[i]
multiplot = True
else:
ax = axs
multiplot = False
ax = sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
self.stats_df,
deploy_dict=self.deploy_dict,
met_ref_df=met_data,
sensor_serials=self.serials,
param=self._param_name,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=self.ref_name,
averaging_interval=averaging_interval,
plot_subset=plot_subset,
write_to_file=write_to_file,
report_fmt=True,
ax=ax,
fig=fig,
**kwargs)
if multiplot:
axs[i] = ax
else:
axs = ax
# Create scatter for all sensors in an evaluation at a specified
# averaging interval
else:
report_fmt = False
# Assuming avg_list contains either only 1-hour or 24-hour
if '1-hour' in avg_list and averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.hourly_ref_df
if '24-hour' in avg_list and averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.daily_ref_df
try:
sensor_data
except NameError as error:
sys.exit(error)
sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
self.stats_df,
deploy_dict=self.deploy_dict,
met_ref_df=self.met_hourly_ref_df,
sensor_serials=self.serials,
param=self._param_name,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=self.ref_name,
averaging_interval=averaging_interval,
plot_subset=plot_subset,
report_fmt=report_fmt,
write_to_file=self.write_to_file,
**kwargs)
def plot_met_dist(self):
"""Plot the distribution of temperature and RH recorded by
meterological instruments at the collocation site.
Displays the relative frequency of meteorological measurements recorded
during the testing period. Temperature (left) and relative humidity
(right) measurements are displayed on separate subplots. Measurements
are grouped into 15 bins, and the frequency of measurements within bin
is normalized by the total number of measurements (i.e., the relative
frequency) is displayed as a histogram. Additionally, a polynomial
estimating the kernel density of measurements is shown for each subplot
and indicates the general distribution of measurements over the range
of recorded values.
This method will prioritize plotting meteorological measurements made
by reference instruments, as sensor measurements are commonly biased
warmer and drier than ambient conditions if measurements are made by
an onboard sensing component within the housing of the air sensor. If
no meteorological reference measurements are available, the method will
use sensor measurements; however, a disclaimer will displayed above
subplots indicating that sensor measurements are shown in the figure.
Returns:
None.
"""
met_params = ['Temp_Value', 'RH_Value']
sensortoolkit.plotting.met_distrib(self.met_hourly_ref_df[met_params],
self.avg_hrly_df,
figure_path=self.figure_path,
sensor_name=self.name,
write_to_file=self.write_to_file)
def plot_met_influence(self, met_param='Temp', report_fmt=True,
**kwargs):
"""Plot the influence meteorological parameters (temperature or
relative humidity) on sensor measurements.
Sensor measurements that have been normalized by reference measurement
values for the corresponding timestamp and are plotted along the
y-axis. Meteorological measurements as measured by temperature or
relative humidity monitors (rather than onboard sensor measurements)
are plotted along the x-axis. Scatter for each sensor are displayed as
separate colors to indicate the unique response of each sensor unit.
A gray 1:1 line indicates ideal agreement between sensor and reference
measurements over the range of meteorological conditions (i.e., a ratio
of 1 would indicate that the sensor and reference measure the same
concentration value for a given timestamp). Scatter below the 1:1
line indicates underestimation bias, and scatter above the 1:1 line
indicates overestimation bias.
Args:
met_param (str, optional):
Either ``'Temp'`` for displaying the influence of temperature
or ``'RH'`` for displaying the influence of relative humidity.
Defaults to None.
report_fmt (bool, optional):
If true, format figure for inclusion in a performance report.
Defaults to True.
**kwargs (dict): Plotting keyword arguments.
Returns:
None.
"""
# Reference data header names for met data
valid_met_params = ['Temp', 'RH']
if report_fmt is True:
fig, axs = plt.subplots(1, 2, figsize=(8.1, 3.8))
fig.subplots_adjust(hspace=0.7)
kwargs['fontsize'] = kwargs.get('fontsize', 10)
kwargs['ylims'] = kwargs.get('ylims', (-.3, 4))
for i, m_param in enumerate(valid_met_params):
# Prevent writing to file on first iteration of loop
if i == 0:
write_to_file = False
if i == 1:
write_to_file = self.write_to_file
axs[i] = sensortoolkit.plotting.normalized_met_scatter(
self.hourly_df_list,
self.hourly_ref_df,
self.avg_hrly_df,
self.met_hourly_ref_df,
self.figure_path,
param=self._param_name,
sensor_serials=self.serials,
sensor_name=self.name,
met_param=m_param,
ref_name=self.ref_name,
write_to_file=write_to_file,
report_fmt=report_fmt,
fig=fig,
ax=axs[i],
**kwargs)
if i == 0:
axs[i].get_legend().remove()
else:
# Either Temp or RH must be passed to met_param if not using report
# formatting. Report formatted plots dont require a value for
# met_param as both Temp and RH scatter are automatically plotted.
if met_param not in valid_met_params:
sys.exit(f'Invalid parameter name: {met_param}')
sensortoolkit.plotting.normalized_met_scatter(
self.hourly_df_list,
self.hourly_ref_df,
self.avg_hrly_df,
self.met_hourly_ref_df,
self.figure_path,
param=self._param_name,
sensor_serials=self.serials,
sensor_name=self.name,
met_param=met_param,
ref_name=self.ref_name,
write_to_file=self.write_to_file,
**kwargs)
def plot_sensor_met_scatter(self, averaging_interval='1-hour',
met_param='Temp',
**kwargs):
"""Plot internal sensor temp or RH measurements against collocated
reference monitor measurements.
Plots generated by this method:
* Internal sensor RH vs Reference monitor RH
* Internal sensor Temp vs Reference monitor Temp
Sensor measurements are plotted along the y-axis with reference
measurements along the x-axis. Statistical quantities are displayed
for each scatter plot including the ordinary least-squares (OLS)
regression equation, R^2, RMSE, and N (the number of measurement
pairs). The one-to-one line (indicating ideal agreement between
sensor and reference measurements) is shown as a dashed gray line.
Args:
averaging_interval (str, optional):
The measurement averaging intervals commonly utilized for
analyzing data corresponding the the selected parameter.
Defaults to '1-hour'.
met_param (str, optional):
The meteorological parameter to display. Defaults to None.
**kwargs (dict):
Plotting keyword arguments.
Returns:
None.
"""
# Data header names for met data
met_params = ['Temp', 'RH']
if met_param not in met_params:
sys.exit('Invalid parameter name: ' + str(met_param))
if averaging_interval not in self.param.averaging:
txt = ('Invalid averaging interval, choose from the following: '
+ ', '.join(self.param.averaging))
sys.exit(txt)
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.met_hourly_ref_df
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.met_daily_ref_df
ref_name = ref_data[met_param + '_Method'].unique()[0]
ymin = math.floor(self.avg_hrly_df[
'mean_' + met_param + '_Value'].min())
ymax = round(self.avg_hrly_df[
'mean_' + met_param + '_Value'].max(), -1)
xmin, xmax = ymin, ymax
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
try:
self.stats_df
except AttributeError:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
fontsize = sensortoolkit.plotting.set_fontsize(self.serials)
# Set keyword argument values to defaults or passed values
kwargs['fontsize'] = kwargs.get('fontsize', fontsize)
kwargs['ylims'] = kwargs.get('ylims', (ymin, ymax))
kwargs['xlims'] = kwargs.get('xlims', (xmin, xmax))
kwargs['param_class'] = 'Met'
kwargs['tick_spacing'] = kwargs.get('tick_spacing', 10)
kwargs['show_colorbar'] = False
sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
deploy_dict=self.deploy_dict,
param=met_param,
sensor_name=self.name,
ref_name=ref_name,
averaging_interval=averaging_interval,
figure_path=self.figure_path,
write_to_file=self.write_to_file,
sensor_serials=self.serials,
**kwargs)
def print_eval_metrics(self, averaging_interval='24-hour'):
"""Display a summary of performance evaluation results using
EPA’s recommended performance metrics (‘PM25’ and ‘O3’).
The coefficient of variation, sensor vs FRM/FEM OLS regression slope,
intercept, and R2, and RMSE are displayed. Regression statistics
are computed for each sensor, and the mean metric value is
presented alongside the range (min to max).
Args:
averaging_interval (dict, optional):
The measurement averaging intervals commonly utilized for
analyzing data corresponding the the selected parameter.
Defaults to '24-hour'.
Returns:
None.
"""
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
self.calculate_metrics()
param = self._param_name
deploy_dic = self.deploy_dict
deploy_stats = self.stats_df.where(
self.stats_df['Averaging Interval'] == averaging_interval)
print(88*'-')
print('{:^88s}'.format(self.name + ' '
+ averaging_interval +
' Performance Evaluation Results'))
print('{:^88s}'.format('Reference Method: ' + self.ref_name))
print(88*'-')
print('{:^6s}|{:^24s}|{:^24s}|{:^24s}|{:^6s}'.format('CV', 'Slope',
'Intercept', 'R^2', 'RMSE'))
print(88*'-')
cv_data = [(deploy_dic['Deployment Groups'][group]
[param]['Precision']['cv_' + averaging_interval])
for group in deploy_dic['Deployment Groups']]
slope_avg = deploy_stats.Slope.mean()
slope_min = deploy_stats.Slope.min()
slope_max = deploy_stats.Slope.max()
intercept_avg = deploy_stats.Intercept.mean()
intercept_min = deploy_stats.Intercept.min()
intercept__max = deploy_stats.Intercept.max()
linearity_avg = deploy_stats['R$^2$'].mean()
linearity_min = deploy_stats['R$^2$'].min()
linearity_max = deploy_stats['R$^2$'].max()
rmse_data = [(deploy_dic['Deployment Groups'][group]
[param]['Error']['rmse_' + averaging_interval])
for group in deploy_dic['Deployment Groups']]
print(('{:^6.1f}|{:^24.2f}|'
'{:^24.2f}|{:^24.2f}|{:^6.1f}').format(cv_data[0],
slope_avg,
intercept_avg,
linearity_avg,
rmse_data[0]))
print(5*' ',
('| ({:4.2f} to {:4.2f}) '
'| ({:4.2f} to {:4.2f}) '
'| ({:4.2f} to {:4.2f}) |').format(slope_min,
slope_max,
intercept_min,
intercept__max,
linearity_min,
linearity_max),
5*' ')
def print_eval_conditions(self, averaging_interval='24-hour'):
"""Display conditions for the evaluation parameter and meteorological
conditions during the testing period.
Values for the evaluation parameter recorded by the sensor, FRM/FEM
instrument, and temperature and relative humidity values are
displayed by the mean of 1-hour or 24-hour averages during the
testing period. The range (min to max) of each parameter is listed
below the mean in parentheses.
Args:
averaging_interval (str, optional):
The measurement averaging intervals commonly utilized for
analyzing data corresponding the the selected parameter.
Defaults to '24-hour'.
Returns:
None.
"""
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
self.calculate_metrics()
if averaging_interval == '1-hour':
ref_df = self.hourly_ref_df
met_ref_df = self.met_hourly_ref_df
if averaging_interval == '24-hour':
ref_df = self.daily_ref_df
met_ref_df = self.met_daily_ref_df
deploy_dict = self.deploy_dict
deploy_stats = self.stats_df.where(
self.stats_df['Averaging Interval'] == averaging_interval
).dropna(how='all', axis=0)
n_sensors = len(self.serials)
print(88*'-')
print('{:^88s}'.format(self.name + ' (' + str(n_sensors) + ') '
+ averaging_interval +
' Evaluation Conditions'))
print(88*'-')
print('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'.format(
'Eval period', 'Duration', 'Sensor ' + self._param_name,
'Ref ' + self._param_name, 'Temp', 'RH'))
print(88*'-')
deploy_loc = deploy_dict['Deployment Groups']
eval_start = [pd.to_datetime(deploy_loc[group]['eval_start']
).strftime('%m-%d-%y')
for group in deploy_loc]
eval_end = [pd.to_datetime(deploy_loc[group]['eval_end']
).strftime('%m-%d-%y')
for group in deploy_loc]
eval_duration = [str(pd.to_timedelta(
deploy_loc[group]['eval_duration']
).round('D').days) + ' days'
for group in deploy_dict['Deployment Groups']]
sensor_min = format(deploy_stats.Sensor_Min.min(), '3.1f')
sensor_max = format(deploy_stats.Sensor_Max.max(), '3.1f')
sensor_mean = format(deploy_stats.Sensor_Mean.mean(), '3.1f')
ref_min = format(ref_df[self._param_name + '_Value'].min(), '3.1f')
ref_max = format(ref_df[self._param_name + '_Value'].max(), '3.1f')
ref_mean = format(ref_df[self._param_name + '_Value'].mean(), '3.1f')
temp_min = format(met_ref_df['Temp_Value'].min(), '2.0f')
temp_max = format(met_ref_df['Temp_Value'].max(), '2.0f')
temp_mean = format(met_ref_df['Temp_Value'].mean(), '2.0f')
rh_min = format(met_ref_df['RH_Value'].min(), '2.0f')
rh_max = format(met_ref_df['RH_Value'].max(), '2.0f')
rh_mean = format(met_ref_df['RH_Value'].mean(), '2.0f')
print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'
).format(eval_start[0]+'-',
eval_duration[0],
sensor_mean,
ref_mean,
temp_mean,
rh_mean))
print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'
).format(eval_end[0],
'',
'(' + sensor_min + ' to ' + sensor_max + ')',
'(' + ref_min + ' to ' + ref_max + ')',
'(' + temp_min + ' to ' + temp_max + ')',
'(' + rh_min + ' to ' + rh_max + ')'))
| 45.846825 | 103 | 0.540741 |
import math
import json
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sensortoolkit.calculate
import sensortoolkit.datetime_utils
import sensortoolkit.deploy
import sensortoolkit.lib_utils
import sensortoolkit.model
import sensortoolkit.param
import sensortoolkit.plotting
import sensortoolkit.qc
import sensortoolkit.reference
import sensortoolkit.ingest
from sensortoolkit import presets as _presets
class SensorEvaluation:
def __init__(self, sensor, param, reference, write_to_file=False,
**kwargs):
self.sensor = sensor
self.name = sensor.name
self.reference = reference
try:
self.sensor.data
except AttributeError as error:
sys.exit(f'{error}, use the AirSensor.load_data() method to import'
f' data')
self.path = sensor.project_path
self.serials = sensor.serials
self.param = param
self._param_name = param.name
if self._param_name not in self.sensor.param_headers:
raise AttributeError(f'{self._param_name} is not in the list of '
f'parameters measured by {self.name}')
self.write_to_file = write_to_file
self.testing_loc = _presets.test_loc
self.testing_org = _presets.test_org
self.__dict__.update(**kwargs)
self.kwargs = kwargs
self.figure_path = os.path.join(self.path, 'figures', self.name, '')
self.stats_path = os.path.join(self.path, 'data',
'eval_stats', self.name, '')
rec_int = self.sensor.recording_interval
self.full_df_list = list(self.sensor.data[rec_int].values())
self.hourly_df_list = list(self.sensor.data['1-hour'].values())
self.daily_df_list = list(self.sensor.data['24-hour'].values())
self.deploy_period_df = sensortoolkit.deploy.deployment_period(
self.full_df_list,
self.name,
self.serials)
self.deploy_dict = sensortoolkit.deploy.construct_deploy_dict(
self.deploy_period_df,
self.full_df_list,
self.hourly_df_list,
self.daily_df_list,
self.name,
**self.kwargs)
deploy_grps = self.deploy_dict['Deployment Groups']
deploy_bdate = min([pd.to_datetime(deploy_grps[grp]['eval_start'])
for grp in deploy_grps.keys()])
self.deploy_bdate = self.kwargs.get('deploy_bdate', deploy_bdate)
deploy_edate = max([pd.to_datetime(deploy_grps[grp]['eval_end'])
for grp in deploy_grps.keys()])
self.deploy_edate = self.kwargs.get('deploy_edate', deploy_edate)
self._assign_refdata_objs()
self.hourly_df_list = sensortoolkit.calculate.normalize(
self.hourly_df_list,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
self.daily_df_list = sensortoolkit.calculate.normalize(
self.daily_df_list,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
self.avg_hrly_df = sensortoolkit.calculate.intersensor_mean(
self.hourly_df_list,
self.deploy_dict)
self.avg_daily_df = sensortoolkit.calculate.intersensor_mean(
self.daily_df_list,
self.deploy_dict)
self.stats_df = pd.DataFrame()
self.avg_stats_df = pd.DataFrame()
def _assign_refdata_objs(self):
self.ref_dict = self.reference.data
self.hourly_ref_df = self.ref_dict[self.param.classifier]['1-hour']
hourly_ref_idx = self.hourly_ref_df.index
ref_param_cols = ['_Value', '_Unit', '_QAQC_Code', '_Param_Code',
'_Method', '_Method_Code', '_Method_POC']
site_cols = ['Agency', 'Site_Name', 'Site_AQS',
'Site_Lat', 'Site_Lon', 'Data_Source',
'Data_Acquisition_Date_Time']
if not self.ref_dict['PM']['1-hour'].empty:
self.pm_hourly_ref_df = self.ref_dict['PM']['1-hour']
self.pm_daily_ref_df = self.ref_dict['PM']['24-hour']
else:
cols = ['PM25' + col for col in ref_param_cols]
cols = cols + site_cols
self.pm_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
for col_name in [col for col in cols if col.endswith('_Method')]:
self.pm_hourly_ref_df[col_name] = 'Unknown Reference'
self.pm_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.pm_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
if not self.ref_dict['Gases']['1-hour'].empty:
self.gas_hourly_ref_df = self.ref_dict['Gases']['1-hour']
self.gas_daily_ref_df = self.ref_dict['Gases']['24-hour']
else:
cols = ['O3' + col for col in ref_param_cols]
cols = cols + site_cols
self.gas_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
for col_name in [col for col in cols if col.endswith('_Method')]:
self.gas_hourly_ref_df[col_name] = 'Unknown Reference'
self.gas_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.gas_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
if not self.ref_dict['Met']['1-hour'].empty:
self.met_hourly_ref_df = self.ref_dict['Met']['1-hour']
self.met_daily_ref_df = self.ref_dict['Met']['24-hour']
else:
cols = [met_param + col for col in ref_param_cols
for met_param in ['RH', 'Temp']]
cols = cols + site_cols
self.met_hourly_ref_df = pd.DataFrame(np.nan,
index=hourly_ref_idx,
columns=cols,
dtype=object)
for col_name in [col for col in cols if col.endswith('_Method')]:
self.met_hourly_ref_df[col_name] = 'Unknown Reference'
self.met_daily_ref_df = sensortoolkit.datetime_utils.interval_averaging(
self.met_hourly_ref_df,
freq='D',
interval_count=24,
thres=0.75)
self.ref_name = self.reference.get_method_name(self.param.name)
self.daily_ref_df = self.ref_dict[self.param.classifier]['24-hour']
def add_deploy_dict_stats(self):
self.deploy_dict = sensortoolkit.calculate.cv(
self.hourly_df_list,
self.deploy_dict,
param=self._param_name)
self.deploy_dict = sensortoolkit.calculate.cv(
self.daily_df_list,
self.deploy_dict,
param=self._param_name)
self.deploy_dict = sensortoolkit.calculate.rmse(
self.hourly_df_list,
self.hourly_ref_df,
self.deploy_dict,
param=self._param_name)
self.deploy_dict = sensortoolkit.calculate.rmse(
self.daily_df_list,
self.daily_ref_df,
self.deploy_dict,
param=self._param_name)
self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(
self.deploy_dict,
self.hourly_ref_df,
param=self._param_name,
ref_name=self.ref_name)
self.deploy_dict = sensortoolkit.deploy.deploy_ref_stats(
self.deploy_dict,
self.daily_ref_df,
param=self._param_name,
ref_name=self.ref_name)
self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(
self.deploy_dict,
self.hourly_df_list,
self.met_hourly_ref_df)
self.deploy_dict = sensortoolkit.deploy.deploy_met_stats(
self.deploy_dict,
self.daily_df_list,
self.met_daily_ref_df)
if self.write_to_file is True:
today = sensortoolkit.datetime_utils.get_todays_date()
if not os.path.exists(self.stats_path):
os.makedirs(self.stats_path)
with open(self.stats_path + self.name + '_' +
self._param_name + "_Evaluation_" + today +
".json", "w") as outfile:
deploy_json = json.dumps(self.deploy_dict, indent=4)
outfile.write(deploy_json)
def calculate_metrics(self):
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
hourly_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.hourly_df_list,
ref_df_obj=self.hourly_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
daily_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.daily_df_list,
ref_df_obj=self.daily_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
self.stats_df = sensortoolkit.calculate.join_stats(
hourly_stats,
daily_stats,
stats_path=self.stats_path,
stats_type='individual',
write_to_file=self.write_to_file)
avg_hourly_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.hourly_df_list,
ref_df_obj=self.hourly_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
avg_daily_stats = sensortoolkit.calculate.regression_stats(
sensor_df_obj=self.daily_df_list,
ref_df_obj=self.daily_ref_df,
deploy_dict=self.deploy_dict,
param=self._param_name,
serials=self.serials
)
self.avg_stats_df = sensortoolkit.calculate.join_stats(
avg_hourly_stats,
avg_daily_stats,
stats_path=self.stats_path,
stats_type='average',
write_to_file=self.write_to_file)
def plot_timeseries(self, report_fmt=True, **kwargs):
timestamp_fmt = '%Y-%m-%d %H:%M:%S'
t_start = (self.avg_hrly_df.dropna(how='all', axis=0).index[0] -
pd.Timedelta('1D')).strftime(timestamp_fmt)
t_end = (self.avg_hrly_df.dropna(how='all', axis=0).index[-1] +
pd.Timedelta('1D')).strftime(timestamp_fmt)
avg_list = self.param.averaging
param = kwargs.get('param', self._param_name)
kwargs.pop('param', None)
if len(avg_list) == 2 and report_fmt is True:
fig, axs = plt.subplots(2, 1, figsize=(10.15, 4.1))
fig.subplots_adjust(hspace=0.7)
for i, averaging_interval in enumerate(avg_list):
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]
ref_name = self.reference.get_method_name(self.param.name)
if i == 0:
write_to_file = False
if i == len(avg_list) - 1:
write_to_file = self.write_to_file
axs[i] = sensortoolkit.plotting.sensor_timeplot(
sensor_data,
ref_data,
sensor_serials=self.serials,
param=param,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=ref_name,
bdate=t_start,
edate=t_end,
averaging_interval=averaging_interval,
report_fmt=report_fmt,
write_to_file=write_to_file,
ax=axs[i],
fig=fig,
**kwargs)
if i == 0:
axs[i].get_legend().remove()
else:
averaging_interval = kwargs.get('averaging_interval', '1-hour')
kwargs.pop('averaging_interval', None)
if '1-hour' in avg_list and averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
if '24-hour' in avg_list and averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.ref_dict[sensortoolkit.Parameter(param).classifier][averaging_interval]
ref_name = ref_data[f'{param}_Method'].unique()[0]
try:
sensor_data
except NameError as error:
sys.exit(error)
sensortoolkit.plotting.sensor_timeplot(
sensor_data,
ref_data,
sensor_serials=self.serials,
param=param,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=ref_name,
bdate=t_start,
edate=t_end,
averaging_interval=averaging_interval,
report_fmt=report_fmt,
write_to_file=self.write_to_file,
**kwargs)
def plot_metrics(self, **kwargs):
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
sensortoolkit.plotting.performance_metrics(
self.stats_df,
self.deploy_dict,
param=self._param_name,
param_averaging=self.param.averaging,
path=self.figure_path,
sensor_name=self.name,
write_to_file=self.write_to_file,
**kwargs)
def plot_sensor_scatter(self, averaging_interval='24-hour',
plot_subset=None, **kwargs):
report_fmt = kwargs.get('report_fmt', False)
kwargs.pop('report_fmt', None)
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
avg_list = self.param.averaging
if not report_fmt and averaging_interval not in avg_list:
txt = ('Invalid averaging interval, choose from the following: '
+ ', '.join(avg_list))
sys.exit(txt)
if (report_fmt is True and plot_subset is not None):
if len(avg_list) == 2:
figsize = (5.29, 3.17)
elif len(avg_list) == 1:
figsize = (4.3, 3.91)
else:
sys.exit('Reporting template formatted '
'figure not specified for ' + self._param_name)
fig, axs = plt.subplots(1, len(avg_list), figsize=figsize)
fig.subplots_adjust(hspace=0.7)
for i, averaging_interval in enumerate(self.param.averaging):
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.hourly_ref_df
met_data = self.met_hourly_ref_df
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.daily_ref_df
met_data = self.met_daily_ref_df
if i == 0:
write_to_file = False
kwargs['draw_cbar'] = False
if i == len(self.param.averaging) - 1:
write_to_file = self.write_to_file
kwargs['draw_cbar'] = True
if isinstance(axs, np.ndarray):
ax = axs[i]
multiplot = True
else:
ax = axs
multiplot = False
ax = sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
self.stats_df,
deploy_dict=self.deploy_dict,
met_ref_df=met_data,
sensor_serials=self.serials,
param=self._param_name,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=self.ref_name,
averaging_interval=averaging_interval,
plot_subset=plot_subset,
write_to_file=write_to_file,
report_fmt=True,
ax=ax,
fig=fig,
**kwargs)
if multiplot:
axs[i] = ax
else:
axs = ax
else:
report_fmt = False
if '1-hour' in avg_list and averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.hourly_ref_df
if '24-hour' in avg_list and averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.daily_ref_df
try:
sensor_data
except NameError as error:
sys.exit(error)
sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
self.stats_df,
deploy_dict=self.deploy_dict,
met_ref_df=self.met_hourly_ref_df,
sensor_serials=self.serials,
param=self._param_name,
figure_path=self.figure_path,
sensor_name=self.name,
ref_name=self.ref_name,
averaging_interval=averaging_interval,
plot_subset=plot_subset,
report_fmt=report_fmt,
write_to_file=self.write_to_file,
**kwargs)
def plot_met_dist(self):
met_params = ['Temp_Value', 'RH_Value']
sensortoolkit.plotting.met_distrib(self.met_hourly_ref_df[met_params],
self.avg_hrly_df,
figure_path=self.figure_path,
sensor_name=self.name,
write_to_file=self.write_to_file)
def plot_met_influence(self, met_param='Temp', report_fmt=True,
**kwargs):
valid_met_params = ['Temp', 'RH']
if report_fmt is True:
fig, axs = plt.subplots(1, 2, figsize=(8.1, 3.8))
fig.subplots_adjust(hspace=0.7)
kwargs['fontsize'] = kwargs.get('fontsize', 10)
kwargs['ylims'] = kwargs.get('ylims', (-.3, 4))
for i, m_param in enumerate(valid_met_params):
if i == 0:
write_to_file = False
if i == 1:
write_to_file = self.write_to_file
axs[i] = sensortoolkit.plotting.normalized_met_scatter(
self.hourly_df_list,
self.hourly_ref_df,
self.avg_hrly_df,
self.met_hourly_ref_df,
self.figure_path,
param=self._param_name,
sensor_serials=self.serials,
sensor_name=self.name,
met_param=m_param,
ref_name=self.ref_name,
write_to_file=write_to_file,
report_fmt=report_fmt,
fig=fig,
ax=axs[i],
**kwargs)
if i == 0:
axs[i].get_legend().remove()
else:
if met_param not in valid_met_params:
sys.exit(f'Invalid parameter name: {met_param}')
sensortoolkit.plotting.normalized_met_scatter(
self.hourly_df_list,
self.hourly_ref_df,
self.avg_hrly_df,
self.met_hourly_ref_df,
self.figure_path,
param=self._param_name,
sensor_serials=self.serials,
sensor_name=self.name,
met_param=met_param,
ref_name=self.ref_name,
write_to_file=self.write_to_file,
**kwargs)
def plot_sensor_met_scatter(self, averaging_interval='1-hour',
met_param='Temp',
**kwargs):
met_params = ['Temp', 'RH']
if met_param not in met_params:
sys.exit('Invalid parameter name: ' + str(met_param))
if averaging_interval not in self.param.averaging:
txt = ('Invalid averaging interval, choose from the following: '
+ ', '.join(self.param.averaging))
sys.exit(txt)
if averaging_interval == '1-hour':
sensor_data = self.hourly_df_list
ref_data = self.met_hourly_ref_df
if averaging_interval == '24-hour':
sensor_data = self.daily_df_list
ref_data = self.met_daily_ref_df
ref_name = ref_data[met_param + '_Method'].unique()[0]
ymin = math.floor(self.avg_hrly_df[
'mean_' + met_param + '_Value'].min())
ymax = round(self.avg_hrly_df[
'mean_' + met_param + '_Value'].max(), -1)
xmin, xmax = ymin, ymax
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
try:
self.stats_df
except AttributeError:
print('Calculating OLS regression statistics for 1-hr and 24-hr '
'sensor vs. reference measurements')
self.calculate_metrics()
fontsize = sensortoolkit.plotting.set_fontsize(self.serials)
kwargs['fontsize'] = kwargs.get('fontsize', fontsize)
kwargs['ylims'] = kwargs.get('ylims', (ymin, ymax))
kwargs['xlims'] = kwargs.get('xlims', (xmin, xmax))
kwargs['param_class'] = 'Met'
kwargs['tick_spacing'] = kwargs.get('tick_spacing', 10)
kwargs['show_colorbar'] = False
sensortoolkit.plotting.scatter_plotter(
sensor_data,
ref_data,
deploy_dict=self.deploy_dict,
param=met_param,
sensor_name=self.name,
ref_name=ref_name,
averaging_interval=averaging_interval,
figure_path=self.figure_path,
write_to_file=self.write_to_file,
sensor_serials=self.serials,
**kwargs)
def print_eval_metrics(self, averaging_interval='24-hour'):
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
self.calculate_metrics()
param = self._param_name
deploy_dic = self.deploy_dict
deploy_stats = self.stats_df.where(
self.stats_df['Averaging Interval'] == averaging_interval)
print(88*'-')
print('{:^88s}'.format(self.name + ' '
+ averaging_interval +
' Performance Evaluation Results'))
print('{:^88s}'.format('Reference Method: ' + self.ref_name))
print(88*'-')
print('{:^6s}|{:^24s}|{:^24s}|{:^24s}|{:^6s}'.format('CV', 'Slope',
'Intercept', 'R^2', 'RMSE'))
print(88*'-')
cv_data = [(deploy_dic['Deployment Groups'][group]
[param]['Precision']['cv_' + averaging_interval])
for group in deploy_dic['Deployment Groups']]
slope_avg = deploy_stats.Slope.mean()
slope_min = deploy_stats.Slope.min()
slope_max = deploy_stats.Slope.max()
intercept_avg = deploy_stats.Intercept.mean()
intercept_min = deploy_stats.Intercept.min()
intercept__max = deploy_stats.Intercept.max()
linearity_avg = deploy_stats['R$^2$'].mean()
linearity_min = deploy_stats['R$^2$'].min()
linearity_max = deploy_stats['R$^2$'].max()
rmse_data = [(deploy_dic['Deployment Groups'][group]
[param]['Error']['rmse_' + averaging_interval])
for group in deploy_dic['Deployment Groups']]
print(('{:^6.1f}|{:^24.2f}|'
'{:^24.2f}|{:^24.2f}|{:^6.1f}').format(cv_data[0],
slope_avg,
intercept_avg,
linearity_avg,
rmse_data[0]))
print(5*' ',
('| ({:4.2f} to {:4.2f}) '
'| ({:4.2f} to {:4.2f}) '
'| ({:4.2f} to {:4.2f}) |').format(slope_min,
slope_max,
intercept_min,
intercept__max,
linearity_min,
linearity_max),
5*' ')
def print_eval_conditions(self, averaging_interval='24-hour'):
try:
self.deploy_dict['Deployment Groups']['Group 1'][self._param_name]
except KeyError:
print('Populating deployment dataframe with evaluation statistics')
self.add_deploy_dict_stats()
if self.stats_df.empty:
self.calculate_metrics()
if averaging_interval == '1-hour':
ref_df = self.hourly_ref_df
met_ref_df = self.met_hourly_ref_df
if averaging_interval == '24-hour':
ref_df = self.daily_ref_df
met_ref_df = self.met_daily_ref_df
deploy_dict = self.deploy_dict
deploy_stats = self.stats_df.where(
self.stats_df['Averaging Interval'] == averaging_interval
).dropna(how='all', axis=0)
n_sensors = len(self.serials)
print(88*'-')
print('{:^88s}'.format(self.name + ' (' + str(n_sensors) + ') '
+ averaging_interval +
' Evaluation Conditions'))
print(88*'-')
print('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'.format(
'Eval period', 'Duration', 'Sensor ' + self._param_name,
'Ref ' + self._param_name, 'Temp', 'RH'))
print(88*'-')
deploy_loc = deploy_dict['Deployment Groups']
eval_start = [pd.to_datetime(deploy_loc[group]['eval_start']
).strftime('%m-%d-%y')
for group in deploy_loc]
eval_end = [pd.to_datetime(deploy_loc[group]['eval_end']
).strftime('%m-%d-%y')
for group in deploy_loc]
eval_duration = [str(pd.to_timedelta(
deploy_loc[group]['eval_duration']
).round('D').days) + ' days'
for group in deploy_dict['Deployment Groups']]
sensor_min = format(deploy_stats.Sensor_Min.min(), '3.1f')
sensor_max = format(deploy_stats.Sensor_Max.max(), '3.1f')
sensor_mean = format(deploy_stats.Sensor_Mean.mean(), '3.1f')
ref_min = format(ref_df[self._param_name + '_Value'].min(), '3.1f')
ref_max = format(ref_df[self._param_name + '_Value'].max(), '3.1f')
ref_mean = format(ref_df[self._param_name + '_Value'].mean(), '3.1f')
temp_min = format(met_ref_df['Temp_Value'].min(), '2.0f')
temp_max = format(met_ref_df['Temp_Value'].max(), '2.0f')
temp_mean = format(met_ref_df['Temp_Value'].mean(), '2.0f')
rh_min = format(met_ref_df['RH_Value'].min(), '2.0f')
rh_max = format(met_ref_df['RH_Value'].max(), '2.0f')
rh_mean = format(met_ref_df['RH_Value'].mean(), '2.0f')
print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'
).format(eval_start[0]+'-',
eval_duration[0],
sensor_mean,
ref_mean,
temp_mean,
rh_mean))
print(('{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}|{:^14s}'
).format(eval_end[0],
'',
'(' + sensor_min + ' to ' + sensor_max + ')',
'(' + ref_min + ' to ' + ref_max + ')',
'(' + temp_min + ' to ' + temp_max + ')',
'(' + rh_min + ' to ' + rh_max + ')'))
| true | true |
1c322f8ba391c2cbf93916a02b75024c2161394f | 3,085 | py | Python | maskrcnn_benchmark/data/transforms/transforms.py | zhilinghuang/maskrcnn-benchmark | 1127bdd368613f320f7b113320e62994c0baa216 | [
"MIT"
] | 54 | 2020-06-14T15:45:01.000Z | 2022-03-26T07:25:46.000Z | maskrcnn_benchmark/data/transforms/transforms.py | zhilinghuang/maskrcnn-benchmark | 1127bdd368613f320f7b113320e62994c0baa216 | [
"MIT"
] | 25 | 2019-05-21T02:20:27.000Z | 2019-09-13T14:56:17.000Z | maskrcnn_benchmark/data/transforms/transforms.py | zhilinghuang/maskrcnn-benchmark | 1127bdd368613f320f7b113320e62994c0baa216 | [
"MIT"
] | 41 | 2019-09-03T06:51:59.000Z | 2022-01-18T02:40:57.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| 28.302752 | 83 | 0.57893 |
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| true | true |
1c322fe79d5d09617c72e056b0c23609d1c7f199 | 1,920 | py | Python | thor/orbits/gibbs.py | KatKiker/thor | ffc8ab3fbaa8af046f531e8111907a891998d14b | [
"BSD-3-Clause"
] | 11 | 2019-08-22T18:37:09.000Z | 2022-02-28T22:49:25.000Z | thor/orbits/gibbs.py | KatKiker/thor | ffc8ab3fbaa8af046f531e8111907a891998d14b | [
"BSD-3-Clause"
] | 57 | 2019-08-20T19:57:14.000Z | 2021-09-16T20:54:59.000Z | thor/orbits/gibbs.py | KatKiker/thor | ffc8ab3fbaa8af046f531e8111907a891998d14b | [
"BSD-3-Clause"
] | 7 | 2021-02-09T21:28:43.000Z | 2022-02-01T08:55:29.000Z | import numpy as np
from ..constants import Constants as c
__all__ = ["calcGibbs"]
MU = c.MU
def calcGibbs(r1, r2, r3):
"""
Calculates the velocity vector at the location of the second position vector (r2) using the
Gibbs method.
.. math::
\vec{D} = \vec{r}_1 \times \vec{r}_2 + \vec{r}_2 \times \vec{r}_3 + \vec{r}_3 \times \vec{r}_1
\vec{N} = r_1 (\vec{r}_2 \times \vec{r}_3) + r_2 (\vec{r}_3 \times \vec{r}_1) + r_3 (\vec{r}_1 \times \vec{r}_2)
\vec{B} \equiv \vec{D} \times \vec{r}_2
L_g \equiv \sqrt{\frac{\mu}{ND}}
\vec{v}_2 = \frac{L_g}{r_2} \vec{B} + L_g \vec{S}
For more details on theory see Chapter 4 in David A. Vallado's "Fundamentals of Astrodynamics
and Applications".
Parameters
----------
r1 : `~numpy.ndarray` (3)
Heliocentric position vector at time 1 in cartesian coordinates in units
of AU.
r2 : `~numpy.ndarray` (3)
Heliocentric position vector at time 2 in cartesian coordinates in units
of AU.
r3 : `~numpy.ndarray` (3)
Heliocentric position vector at time 3 in cartesian coordinates in units
of AU.
Returns
-------
v2 : `~numpy.ndarray` (3)
Velocity of object at position r2 at time t2 in units of AU per day.
"""
r1_mag = np.linalg.norm(r1)
r2_mag = np.linalg.norm(r2)
r3_mag = np.linalg.norm(r3)
Z12 = np.cross(r1, r2)
Z23 = np.cross(r2, r3)
Z31 = np.cross(r3, r1)
coplanarity = np.arcsin(np.dot(Z23, r1) / (np.linalg.norm(Z23) * r1_mag))
N = r1_mag * Z23 + r2_mag * Z31 + r3_mag * Z12
N_mag = np.linalg.norm(N)
D = Z12 + Z23 + Z31
D_mag = np.linalg.norm(D)
S = (r2_mag - r3_mag) * r1 + (r3_mag - r1_mag) * r2 + (r1_mag - r2_mag) * r3
S_mag = np.linalg.norm(S)
B = np.cross(D, r2)
Lg = np.sqrt(MU / N_mag / D_mag)
v2 = Lg / r2_mag * B + Lg * S
return v2 | 30.47619 | 120 | 0.585417 | import numpy as np
from ..constants import Constants as c
__all__ = ["calcGibbs"]
MU = c.MU
def calcGibbs(r1, r2, r3):
r1_mag = np.linalg.norm(r1)
r2_mag = np.linalg.norm(r2)
r3_mag = np.linalg.norm(r3)
Z12 = np.cross(r1, r2)
Z23 = np.cross(r2, r3)
Z31 = np.cross(r3, r1)
coplanarity = np.arcsin(np.dot(Z23, r1) / (np.linalg.norm(Z23) * r1_mag))
N = r1_mag * Z23 + r2_mag * Z31 + r3_mag * Z12
N_mag = np.linalg.norm(N)
D = Z12 + Z23 + Z31
D_mag = np.linalg.norm(D)
S = (r2_mag - r3_mag) * r1 + (r3_mag - r1_mag) * r2 + (r1_mag - r2_mag) * r3
S_mag = np.linalg.norm(S)
B = np.cross(D, r2)
Lg = np.sqrt(MU / N_mag / D_mag)
v2 = Lg / r2_mag * B + Lg * S
return v2 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.