text
stringlengths 2
999k
|
|---|
from django import forms
from django.contrib.auth import get_user_model
from .hooks import hookset
from .models import Message
class UserModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return hookset.display_name(obj)
class UserModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return hookset.display_name(obj)
class NewMessageForm(forms.ModelForm):
subject = forms.CharField()
to_user = UserModelChoiceField(queryset=get_user_model().objects.none())
content = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["to_user"].queryset = hookset.get_user_choices(self.user)
if self.initial.get("to_user") is not None:
qs = self.fields["to_user"].queryset.filter(pk=self.initial["to_user"])
self.fields["to_user"].queryset = qs
def save(self, commit=True):
data = self.cleaned_data
return Message.new_message(
self.user, [data["to_user"]], data["subject"], data["content"]
)
class Meta:
model = Message
fields = ["to_user", "subject", "content"]
class NewMessageFormMultiple(forms.ModelForm):
subject = forms.CharField()
to_user = UserModelMultipleChoiceField(get_user_model().objects.none())
content = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["to_user"].queryset = hookset.get_user_choices(self.user)
if self.initial.get("to_user") is not None:
qs = self.fields["to_user"].queryset.filter(pk__in=self.initial["to_user"])
self.fields["to_user"].queryset = qs
def save(self, commit=True):
data = self.cleaned_data
return Message.new_message(
self.user, data["to_user"], data["subject"], data["content"]
)
class Meta:
model = Message
fields = ["to_user", "subject", "content"]
class MessageReplyForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.thread = kwargs.pop("thread")
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def save(self, commit=True):
return Message.new_reply(
self.thread, self.user, self.cleaned_data["content"]
)
class Meta:
model = Message
fields = ["content"]
|
"""
Waffle flags and switches for user authn.
"""
from edx_toggles.toggles import LegacyWaffleSwitch, LegacyWaffleSwitchNamespace
_WAFFLE_NAMESPACE = 'user_authn'
_WAFFLE_SWITCH_NAMESPACE = LegacyWaffleSwitchNamespace(name=_WAFFLE_NAMESPACE, log_prefix='UserAuthN: ')
# .. toggle_name: user_authn.enable_login_using_thirdparty_auth_only
# .. toggle_implementation: WaffleSwitch
# .. toggle_default: False
# .. toggle_description: When enabled, users must be sign in using their allowed domain SSO account. This includes sign-
# ins to the Django admin dashboard at "/admin".
# .. toggle_use_cases: temporary
# .. toggle_creation_date: 2019-11-20
# .. toggle_target_removal_date: 2020-01-31
# .. toggle_warnings: Requires THIRD_PARTY_AUTH_ONLY_DOMAIN to also be set.
# .. toggle_tickets: ENT-2461
ENABLE_LOGIN_USING_THIRDPARTY_AUTH_ONLY = LegacyWaffleSwitch(
_WAFFLE_SWITCH_NAMESPACE,
'enable_login_using_thirdparty_auth_only',
__name__
)
|
from common.views import AuthenticatedListView, AuthorizedDetailView
from django.db.models import Q
from .models import Book
class BookListView(AuthenticatedListView):
model = Book
context_object_name = 'book_list'
template_name = 'books/book_list.html'
class BookDetailView(AuthorizedDetailView):
model = Book
context_object_name = 'book'
template_name = 'books/book_detail.html'
permission_required = 'books.special_status'
class SearchResultsListView(BookListView):
model = Book
context_object_name = 'book_list'
template_name = 'books/search_results.html'
def get_queryset(self):
query = self.request.GET.get('q')
return Book.objects.filter(
Q(title__icontains=query) | Q(author__icontains=query))
|
import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from torch.nn.init import xavier_normal_
from transformers import *
import random
from helpers import *
class RelationExtractor(nn.Module):
def __init__(self, embedding_dim, relation_dim, num_entities, pretrained_embeddings, device, entdrop, reldrop, scoredrop, l3_reg, model, que_embedding_model, ls, do_batch_norm, freeze=True):
super(RelationExtractor, self).__init__()
self.device = device
self.model = model
self.freeze = freeze
self.label_smoothing = ls
self.l3_reg = l3_reg
self.do_batch_norm = do_batch_norm
if not self.do_batch_norm:
print('Not doing batch norm')
self.pre_trained_model_name = get_pretrained_model_name(que_embedding_model)
if que_embedding_model == 'RoBERTa':
self.que_embedding_model = RobertaModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'XLNet':
self.que_embedding_model = XLNetModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'ALBERT':
self.que_embedding_model = AlbertModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'SentenceTransformer':
self.que_embedding_model = AutoModel.from_pretrained(self.pre_trained_model_name)
elif que_embedding_model == 'Longformer':
self.que_embedding_model = LongformerModel.from_pretrained(self.pre_trained_model_name)
else:
print('Incorrect question embeddding model specified:', que_embedding_model)
exit(0)
for param in self.que_embedding_model.parameters():
param.requires_grad = True
if self.model == 'DistMult':
multiplier = 1
self.getScores = self.DistMult
elif self.model == 'SimplE':
multiplier = 2
self.getScores = self.SimplE
elif self.model == 'ComplEx':
multiplier = 2
self.getScores = self.ComplEx
elif self.model == 'TuckER':
# W_torch = torch.from_numpy(np.load(w_matrix))
# self.W = nn.Parameter(
# torch.Tensor(W_torch),
# requires_grad = not self.freeze
# )
self.W = nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (relation_dim, relation_dim, relation_dim)),
dtype=torch.float, device="cuda", requires_grad=True))
multiplier = 1
self.getScores = self.TuckER
elif self.model == 'RESCAL':
self.getScores = self.RESCAL
multiplier = 1
else:
print('Incorrect model specified:', self.model)
exit(0)
print('Model is', self.model)
self.hidden_dim = 768
self.relation_dim = relation_dim * multiplier
if self.model == 'RESCAL':
self.relation_dim = relation_dim * relation_dim
self.num_entities = num_entities
# self.loss = torch.nn.BCELoss(reduction='sum')
self.loss = self.kge_loss
# best: all dropout 0
self.rel_dropout = torch.nn.Dropout(reldrop)
self.ent_dropout = torch.nn.Dropout(entdrop)
self.score_dropout = torch.nn.Dropout(scoredrop)
self.fcnn_dropout = torch.nn.Dropout(0.1)
# self.pretrained_embeddings = pretrained_embeddings
# random.shuffle(pretrained_embeddings)
# print(pretrained_embeddings[0])
print('Frozen:', self.freeze)
self.embedding = nn.Embedding.from_pretrained(torch.stack(pretrained_embeddings, dim=0), freeze=self.freeze)
# self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pretrained_embeddings), freeze=self.freeze)
print(self.embedding.weight.shape)
# self.embedding = nn.Embedding(self.num_entities, self.relation_dim)
# self.embedding.weight.requires_grad = False
# xavier_normal_(self.embedding.weight.data)
self.mid1 = 512
self.mid2 = 512
self.mid3 = 512
self.mid4 = 512
# self.lin1 = nn.Linear(self.hidden_dim, self.mid1)
# self.lin2 = nn.Linear(self.mid1, self.mid2)
# self.lin3 = nn.Linear(self.mid2, self.mid3)
# self.lin4 = nn.Linear(self.mid3, self.mid4)
# self.hidden2rel = nn.Linear(self.mid4, self.relation_dim)
self.hidden2rel = nn.Linear(self.hidden_dim, self.relation_dim)
self.hidden2rel_base = nn.Linear(self.mid2, self.relation_dim)
if self.model in ['DistMult', 'TuckER', 'RESCAL', 'SimplE']:
self.bn0 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
self.bn2 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
else:
self.bn0 = torch.nn.BatchNorm1d(multiplier)
self.bn2 = torch.nn.BatchNorm1d(multiplier)
self.logsoftmax = torch.nn.LogSoftmax(dim=-1)
self._klloss = torch.nn.KLDivLoss(reduction='sum')
def set_bn_eval(self):
self.bn0.eval()
self.bn2.eval()
def kge_loss(self, scores, targets):
# loss = torch.mean(scores*targets)
return self._klloss(
F.log_softmax(scores, dim=1), F.normalize(targets.float(), p=1, dim=1)
)
def applyNonLinear(self, outputs):
# outputs = self.fcnn_dropout(self.lin1(outputs))
# outputs = F.relu(outputs)
# outputs = self.fcnn_dropout(self.lin2(outputs))
# outputs = F.relu(outputs)
# outputs = self.lin3(outputs)
# outputs = F.relu(outputs)
# outputs = self.lin4(outputs)
# outputs = F.relu(outputs)
outputs = self.hidden2rel(outputs)
# outputs = self.hidden2rel_base(outputs)
return outputs
def TuckER(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
x = head.view(-1, 1, head.size(1))
W_mat = torch.mm(relation, self.W.view(relation.size(1), -1))
W_mat = W_mat.view(-1, head.size(1), head.size(1))
W_mat = self.rel_dropout(W_mat)
x = torch.bmm(x, W_mat)
x = x.view(-1, head.size(1))
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def RESCAL(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
ent_dim = head.size(1)
head = head.view(-1, 1, ent_dim)
relation = relation.view(-1, ent_dim, ent_dim)
relation = self.rel_dropout(relation)
x = torch.bmm(head, relation)
x = x.view(-1, ent_dim)
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def DistMult(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s = self.bn2(s)
s = self.score_dropout(s)
ans = torch.mm(s, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(ans)
return pred
def SimplE(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s_head, s_tail = torch.chunk(s, 2, dim=1)
s = torch.cat([s_tail, s_head], dim=1)
s = self.bn2(s)
s = self.score_dropout(s)
s = torch.mm(s, self.embedding.weight.transpose(1,0))
s = 0.5 * s
pred = torch.sigmoid(s)
return pred
def ComplEx(self, head, relation):
head = torch.stack(list(torch.chunk(head, 2, dim=1)), dim=1)
if self.do_batch_norm:
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
head = head.permute(1, 0, 2)
re_head = head[0]
im_head = head[1]
re_relation, im_relation = torch.chunk(relation, 2, dim=1)
re_tail, im_tail = torch.chunk(self.embedding.weight, 2, dim =1)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = torch.stack([re_score, im_score], dim=1)
if self.do_batch_norm:
score = self.bn2(score)
score = self.score_dropout(score)
score = score.permute(1, 0, 2)
re_score = score[0]
im_score = score[1]
score = torch.mm(re_score, re_tail.transpose(1,0)) + torch.mm(im_score, im_tail.transpose(1,0))
# pred = torch.sigmoid(score)
pred = score
return pred
def getQuestionEmbedding(self, question_tokenized, attention_mask):
if self.que_embedding_model == "SentenceTransformer":
with torch.no_grad():
model_output = self.que_embedding_model(question_tokenized, attention_mask)
# model_output = model(**encoded_input)
question_embedding = mean_pooling(model_output, attention_mask)
return question_embedding[0]
else:
last_hidden_states = self.que_embedding_model(
question_tokenized,
attention_mask=attention_mask).last_hidden_state
states = last_hidden_states.transpose(1,0)
cls_embedding = states[0]
question_embedding = cls_embedding
question_embedding = torch.mean(last_hidden_states, dim=1)
return question_embedding
def forward(self, question_tokenized, attention_mask, p_head, p_tail):
question_embedding = self.getQuestionEmbedding(question_tokenized, attention_mask)
rel_embedding = self.applyNonLinear(question_embedding)
p_head = self.embedding(p_head)
pred = self.getScores(p_head, rel_embedding)
actual = p_tail
if self.label_smoothing:
actual = ((1.0-self.label_smoothing)*actual) + (1.0/actual.size(1))
loss = self.loss(pred, actual)
if not self.freeze:
if self.l3_reg:
norm = torch.norm(self.embedding.weight, p=3, dim=-1)
loss = loss + self.l3_reg * torch.sum(norm)
return loss
def get_score_ranked(self, head, question_tokenized, attention_mask):
question_embedding = self.getQuestionEmbedding(question_tokenized.unsqueeze(0), attention_mask.unsqueeze(0))
rel_embedding = self.applyNonLinear(question_embedding)
head = self.embedding(head).unsqueeze(0)
scores = self.getScores(head, rel_embedding)
# top2 = torch.topk(scores, k=2, largest=True, sorted=True)
# return top2
return scores
|
# Generated by Django 2.0.2 on 2018-10-14 21:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workplace', '0016_address_line_2_blank'),
]
operations = [
migrations.AddField(
model_name='historicalreservation',
name='cancelation_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Cancelation date'),
),
migrations.AddField(
model_name='historicalreservation',
name='cancelation_reason',
field=models.CharField(blank=True, choices=[('U', 'User canceled'), ('TD', 'Timeslot deleted'), ('TM', 'Timeslot modified')], max_length=100, null=True, verbose_name='Cancelation reason'),
),
migrations.AddField(
model_name='reservation',
name='cancelation_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Cancelation date'),
),
migrations.AddField(
model_name='reservation',
name='cancelation_reason',
field=models.CharField(blank=True, choices=[('U', 'User canceled'), ('TD', 'Timeslot deleted'), ('TM', 'Timeslot modified')], max_length=100, null=True, verbose_name='Cancelation reason'),
),
]
|
from cbse_results_scraper.app import CBSEResultsScraper
|
import logging
import sys
from oslo_config import cfg
import openstack
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
PROJECT_NAME = 'glance-share-image'
CONF = cfg.CONF
opts = [
cfg.BoolOpt('dry-run', help='Do not really do anything', default=False),
cfg.StrOpt('action', required=False, help='Action', default='add'),
cfg.StrOpt('cloud', required=False, help='Managed cloud', default='service'),
cfg.StrOpt('image', required=True, help='Image to share'),
cfg.StrOpt('project-domain', required=False, help='Target project domain', default='default'),
cfg.StrOpt('target', required=True, help='Target project or domain'),
cfg.StrOpt('type', required=True, help='Project or domain', default='project')
]
CONF.register_cli_opts(opts)
def unshare_image_with_project(conn, image, project):
member = conn.image.find_member(project.id, image.id)
if member:
logging.info("del - %s - %s (%s)" % (image.name, project.name, project.domain_id))
if not CONF.dry_run:
conn.image.remove_member(member, image.id)
def share_image_with_project(conn, image, project):
member = conn.image.find_member(project.id, image.id)
if not member:
logging.info("add - %s - %s (%s)" % (image.name, project.name, project.domain_id))
if not CONF.dry_run:
member = conn.image.add_member(image.id, member_id=project.id)
if not CONF.dry_run and member.status != "accepted":
logging.info("accept - %s - %s (%s)" % (image.name, project.name, project.domain_id))
conn.image.update_member(member, image.id, status="accepted")
if __name__ == '__main__':
CONF(sys.argv[1:], project=PROJECT_NAME)
conn = openstack.connect(cloud=CONF.cloud)
image = conn.get_image(CONF.image)
if CONF.type == "project":
domain = conn.get_domain(name_or_id=CONF.project_domain)
project = conn.get_project(CONF.target, domain_id=domain.id)
if CONF.action == "add":
share_image_with_project(conn, image, project)
elif CONF.action == "del":
unshare_image_with_project(conn, image, project)
elif CONF.type == "domain":
domain = conn.get_domain(name_or_id=CONF.target)
projects = conn.list_projects(domain_id=domain.id)
for project in projects:
if CONF.action == "add":
share_image_with_project(conn, image, project)
elif CONF.action == "del":
unshare_image_with_project(conn, image, project)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch import GraphConv, HeteroGraphConv
from openhgnn.models.macro_layer.SemanticConv import SemanticAttention
from ..models.layers import homo_layer_dict
class HeteroGeneralLayer(nn.Module):
'''General wrapper for layers'''
def __init__(self, name, rel_names, dim_in, dim_out, dropout, act=None, has_bn=True,
has_l2norm=False, **kwargs):
super(HeteroGeneralLayer, self).__init__()
self.has_l2norm = has_l2norm
has_bn = has_bn
self.layer = RelationConv(name, rel_names, dim_in, dim_out,
bias=not has_bn, **kwargs)
layer_wrapper = []
if has_bn:
layer_wrapper.append(nn.BatchNorm1d(dim_out))
if dropout > 0:
layer_wrapper.append(nn.Dropout(p=dropout))
if act is not None:
layer_wrapper.append(act)
self.post_layer = nn.Sequential(*layer_wrapper)
def forward(self, g, h_dict):
h_dict = self.layer(g, h_dict)
if self.has_l2norm:
for name, batch_h in h_dict.items():
h_dict[name] = F.normalize(self.post_layer(batch_h), p=2, dim=-1)
return h_dict
class BatchNorm1dNode(nn.Module):
'''General wrapper for layers'''
def __init__(self, dim_in):
super(BatchNorm1dNode, self).__init__()
self.bn = nn.BatchNorm1d(dim_in)
def forward(self, h):
h = self.bn(h)
return h
class RelationConv(nn.Module):
def __init__(self, name, rel_names, dim_in, dim_out, bias=False, **kwargs):
super(RelationConv, self).__init__()
macro_func = kwargs['macro_func']
if macro_func == 'attention':
macro_func = SemanticAttention(dim_out)
self.model = HeteroGraphConv({
rel: homo_layer_dict[name](dim_in, dim_out, bias=bias)
for rel in rel_names
}, aggregate=macro_func)
def forward(self, g, h_dict):
h_dict = self.model(g, h_dict)
return h_dict
|
import cgi
import datetime
import jinja2
import json
import os
import pickle
import redis
import urllib
import uwsgi
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# If your application is not hosted on the root of your domain, apply this
# prefix before all URLs:
ROUTE_PREFIX = '/dschat'
DEFAULT_NAME = 'Anon E. Mouse'
DEFAULT_EMAIL = 'not@authenticated.com'
DEFAULT_TOPIC = 'chat'
REDIS_CHANNEL = 'messages'
REDIS_ID_KEY = 'id'
REDIS_MESSAGES_KEY = 'messages'
class Message():
"""A main model for representing an individual sent Message."""
id = 0
name = DEFAULT_NAME
email = DEFAULT_EMAIL
date = datetime.MINYEAR
topic = DEFAULT_TOPIC
content = ""
def encode_messages(messages):
"""Transforms a list of Messages into escaped JSON for passing to HTML."""
messages_to_encode = []
for message in messages:
messages_to_encode.append({
'id': cgi.escape(str(message.id).zfill(10)),
'name': cgi.escape(message.name),
'email': cgi.escape(message.email),
'date': cgi.escape(message.date.strftime('%x %X')),
'topic': cgi.escape(message.topic),
'content': cgi.escape(message.content).replace("\n", "<br>")
})
return json.dumps(messages_to_encode)
class MainPage(webapp2.RequestHandler):
"""Generates the main web page."""
def get(self):
topic = self.request.get('topic', DEFAULT_TOPIC)
user = ''
token = 0
template_values = {
'name': DEFAULT_NAME,
'email': DEFAULT_EMAIL,
'topic': urllib.quote_plus(topic),
'route_prefix': ROUTE_PREFIX
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class SendMessage(webapp2.RequestHandler):
"""Handler for the /send POST request."""
def post(self):
# Create a Message and store it in the DataStore.
#
# We set the same parent key on the 'Message' to ensure each Message is
# in the same entity group. Queries across the single entity group will
# be consistent. However, the write rate to a single entity group should
# be limited to ~1/second.
message = Message()
topic = self.request.get('topic', DEFAULT_TOPIC)
message.topic = topic
message.name = self.request.get('name', DEFAULT_NAME)
message.email = self.request.get('email', DEFAULT_EMAIL)
message.content = self.request.get('content')
message.date = datetime.datetime.now()
r = redis.StrictRedis(host='localhost', port=6379, db=0)
# Note: ideally we'd do the increment and rpush under the protection of
# a transaction. If our program stops (crashes, dies, whatever) between
# these two lines or if the rpush fails then the database will be left
# in an inconsistent state and need to be manually corrected so the
# highest message id == the total # of messages.
message.id = r.incr(REDIS_ID_KEY)
r.rpush(REDIS_MESSAGES_KEY, pickle.dumps(message))
# Now that we've recorded the message in Redis, broadcast it to all open
# clients.
r.publish(REDIS_CHANNEL, encode_messages([message]))
class WebSocketConnection(webapp2.RequestHandler):
"""Handles all inbound websocket requests."""
def get(self):
def handle_request(r, msg):
"""Handle request for more messages received from websocket."""
request = json.loads(msg)
first = int(request["first_id"])
last = int(request["last_id"])
# Don't fetch more than 50 messages at once:
if (last > 0 and (last - 50 > first)) or (last < 0):
first = last - 50
pickled_messages = r.lrange(REDIS_MESSAGES_KEY, first, last)
messages = []
for pickled_message in pickled_messages:
message = pickle.loads(pickled_message)
messages.append(message)
uwsgi.websocket_send(encode_messages(messages))
# The first thing we need to do is take what seems like a normal HTTP
# request and upgrade it to be a websocket request:
uwsgi.websocket_handshake(os.getenv('HTTP_SEC_WEBSOCKET_KEY', ''),
os.getenv('HTTP_ORIGIN', ''))
# Open a connection to the Redis server, and ask to be notified of any
# messages on the channel REDIS_CHANNEL:
r = redis.StrictRedis(host='localhost', port=6379, db=0)
channel = r.pubsub()
channel.subscribe(REDIS_CHANNEL)
# We then want to go to sleep and wait for messages either from Redis,
# or from this websocket. So we need to know their file descriptors:
websocket_fd = uwsgi.connection_fd()
redis_fd = channel.connection._sock.fileno()
while True:
# Setup both FDs with epoll so we can wait for messages. Wake up
# every 3 seconds to ensure that ping messages get exchanged on the
# websocket connection to keep it alive:
uwsgi.wait_fd_read(websocket_fd, 3)
uwsgi.wait_fd_read(redis_fd)
# Put thread to sleep until message arrives or timeout. Note that
# if you do not use a suspend engine (such as ugreen) this will just
# immediately return without suspending, nothing will work, and you
# will get horribly confused.
uwsgi.suspend()
fd = uwsgi.ready_fd()
if fd > -1:
if fd == websocket_fd:
try:
msg = uwsgi.websocket_recv_nb()
if msg:
handle_request(r, msg)
except IOError, e:
# Websocket has failed in some way (such as a browser
# reload), just close it and let the app re-open if it
# is still there to do so:
return
elif fd == redis_fd:
# Got a message from Redis, pass it on to the browser
# through the websocket.
msg = channel.parse_response()
# Redis sends both control messages and user messages
# through this fd. Send only user-generated messages to all
# clients:
if msg[0] == b'message':
uwsgi.websocket_send(msg[2])
else:
# We got a timeout. Call websocket_recv_nb again to manage
# ping/pong:
try:
msg = uwsgi.websocket_recv_nb()
if msg:
handle_request(r, msg)
except IOError, e:
# Websocket has failed in some way (such as a browser
# reload), just close it and let the app re-open if it is
# still there to do so:
return
real_app = webapp2.WSGIApplication([
(ROUTE_PREFIX, MainPage),
(ROUTE_PREFIX + '/', MainPage),
(ROUTE_PREFIX + '/send', SendMessage),
(ROUTE_PREFIX + '/websocket', WebSocketConnection),
], debug=True)
def fake_start_response(unused1, unused2):
pass
# When a client disconnects from websocket (say, via a page reload in the
# browser) both the webapp2 framework and the uwsgi websocket framework ends up
# calling start_response. This generates an annoying exception. So we provide
# a fake start_response to webapp2 for websocket connections only. (What a
# gross hack, feel free to tell me a better way...)
def app(environ, start_response):
if environ['PATH_INFO'] == ROUTE_PREFIX + '/websocket':
return real_app(environ, fake_start_response)
return real_app(environ, start_response)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: types.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='types.proto',
package='tensorflow',
serialized_pb=_b('\n\x0btypes.proto\x12\ntensorflow*\xaa\x06\n\x08\x44\x61taType\x12\x0e\n\nDT_INVALID\x10\x00\x12\x0c\n\x08\x44T_FLOAT\x10\x01\x12\r\n\tDT_DOUBLE\x10\x02\x12\x0c\n\x08\x44T_INT32\x10\x03\x12\x0c\n\x08\x44T_UINT8\x10\x04\x12\x0c\n\x08\x44T_INT16\x10\x05\x12\x0b\n\x07\x44T_INT8\x10\x06\x12\r\n\tDT_STRING\x10\x07\x12\x10\n\x0c\x44T_COMPLEX64\x10\x08\x12\x0c\n\x08\x44T_INT64\x10\t\x12\x0b\n\x07\x44T_BOOL\x10\n\x12\x0c\n\x08\x44T_QINT8\x10\x0b\x12\r\n\tDT_QUINT8\x10\x0c\x12\r\n\tDT_QINT32\x10\r\x12\x0f\n\x0b\x44T_BFLOAT16\x10\x0e\x12\r\n\tDT_QINT16\x10\x0f\x12\x0e\n\nDT_QUINT16\x10\x10\x12\r\n\tDT_UINT16\x10\x11\x12\x11\n\rDT_COMPLEX128\x10\x12\x12\x0b\n\x07\x44T_HALF\x10\x13\x12\x0f\n\x0b\x44T_RESOURCE\x10\x14\x12\x0e\n\nDT_VARIANT\x10\x15\x12\r\n\tDT_UINT32\x10\x16\x12\r\n\tDT_UINT64\x10\x17\x12\x10\n\x0c\x44T_FLOAT_REF\x10\x65\x12\x11\n\rDT_DOUBLE_REF\x10\x66\x12\x10\n\x0c\x44T_INT32_REF\x10g\x12\x10\n\x0c\x44T_UINT8_REF\x10h\x12\x10\n\x0c\x44T_INT16_REF\x10i\x12\x0f\n\x0b\x44T_INT8_REF\x10j\x12\x11\n\rDT_STRING_REF\x10k\x12\x14\n\x10\x44T_COMPLEX64_REF\x10l\x12\x10\n\x0c\x44T_INT64_REF\x10m\x12\x0f\n\x0b\x44T_BOOL_REF\x10n\x12\x10\n\x0c\x44T_QINT8_REF\x10o\x12\x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\x12\x13\n\x0f\x44T_BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\n\x0e\x44T_QUINT16_REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11\x44T_COMPLEX128_REF\x10v\x12\x0f\n\x0b\x44T_HALF_REF\x10w\x12\x13\n\x0f\x44T_RESOURCE_REF\x10x\x12\x12\n\x0e\x44T_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\x10z\x12\x11\n\rDT_UINT64_REF\x10{')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DATATYPE = _descriptor.EnumDescriptor(
name='DataType',
full_name='tensorflow.DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DT_INVALID', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT32', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT64', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT_REF', index=24, number=101,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE_REF', index=25, number=102,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32_REF', index=26, number=103,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8_REF', index=27, number=104,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16_REF', index=28, number=105,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8_REF', index=29, number=106,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING_REF', index=30, number=107,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64_REF', index=31, number=108,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64_REF', index=32, number=109,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL_REF', index=33, number=110,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8_REF', index=34, number=111,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8_REF', index=35, number=112,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32_REF', index=36, number=113,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16_REF', index=37, number=114,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16_REF', index=38, number=115,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16_REF', index=39, number=116,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16_REF', index=40, number=117,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128_REF', index=41, number=118,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF_REF', index=42, number=119,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE_REF', index=43, number=120,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT_REF', index=44, number=121,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT32_REF', index=45, number=122,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT64_REF', index=46, number=123,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=28,
serialized_end=838,
)
_sym_db.RegisterEnumDescriptor(_DATATYPE)
DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE)
DT_INVALID = 0
DT_FLOAT = 1
DT_DOUBLE = 2
DT_INT32 = 3
DT_UINT8 = 4
DT_INT16 = 5
DT_INT8 = 6
DT_STRING = 7
DT_COMPLEX64 = 8
DT_INT64 = 9
DT_BOOL = 10
DT_QINT8 = 11
DT_QUINT8 = 12
DT_QINT32 = 13
DT_BFLOAT16 = 14
DT_QINT16 = 15
DT_QUINT16 = 16
DT_UINT16 = 17
DT_COMPLEX128 = 18
DT_HALF = 19
DT_RESOURCE = 20
DT_VARIANT = 21
DT_UINT32 = 22
DT_UINT64 = 23
DT_FLOAT_REF = 101
DT_DOUBLE_REF = 102
DT_INT32_REF = 103
DT_UINT8_REF = 104
DT_INT16_REF = 105
DT_INT8_REF = 106
DT_STRING_REF = 107
DT_COMPLEX64_REF = 108
DT_INT64_REF = 109
DT_BOOL_REF = 110
DT_QINT8_REF = 111
DT_QUINT8_REF = 112
DT_QINT32_REF = 113
DT_BFLOAT16_REF = 114
DT_QINT16_REF = 115
DT_QUINT16_REF = 116
DT_UINT16_REF = 117
DT_COMPLEX128_REF = 118
DT_HALF_REF = 119
DT_RESOURCE_REF = 120
DT_VARIANT_REF = 121
DT_UINT32_REF = 122
DT_UINT64_REF = 123
DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE
# @@protoc_insertion_point(module_scope)
|
import statsapi
import pandas as pd
# logging
import logging
logger = logging.getLogger('statsapi')
logger.setLevel(logging.DEBUG)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(levelname)8s - %(name)s(%(thread)s) - %(message)s")
ch.setFormatter(formatter)
rootLogger.addHandler(ch)
rookie_hr_leaders = statsapi.league_leaders('homeRuns', season=2021, playerPool = 'rookies', limit=15)
|
from telegram import ReplyKeyboardRemove, Update
from telegram.ext import CallbackContext, CallbackQueryHandler, ConversationHandler, Filters, MessageHandler
from bot.settings import settings
from bot.utils import get_log
from bot.commands._states import (
CANCEL,
START,
)
from ._utils import require_user
log = get_log(__name__)
PASSWORD = 0
@require_user
def start(update: Update, context: CallbackContext):
log.debug('Taken command `start`')
user = update.effective_user
if settings.OWNER:
if user.id == settings.OWNER:
update.message.reply_markdown(
'Greetings, my master!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
else:
update.message.reply_markdown(
'You are not my master!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
update.message.reply_markdown(
'Hi! Bot not configured. Say password '
'so i can be sure you are my master.',
reply_markup=ReplyKeyboardRemove()
)
log.debug('Password next step `%s`', PASSWORD)
return PASSWORD
@require_user
def password(update: Update, context: CallbackContext):
log.debug('Process password')
user = update.effective_user
if settings.OWNER:
if user.id == settings.OWNER:
update.message.reply_markdown(
'Greetings, my master!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
else:
update.message.reply_markdown(
'You are not my master!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
text = update.effective_message.text.strip()
if text == settings.PASSWORD:
settings.OWNER = user.id
text = 'Greetings, my master! I\'m at your service!'
if not settings.VK_APP_ID or not settings.VK_APP_TOKEN:
text += '\n\nSend /config to configure me.'
update.message.reply_markdown(
text,
reply_markup=ReplyKeyboardRemove()
)
else:
update.message.reply_markdown(
'Nope! Think better!',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def cancel(update: Update, context: CallbackContext):
user = update.effective_user
log.debug("User %s canceled the conversation.", user.first_name)
update.message.reply_markdown('As you wish 🤷🏻♂️',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
handler = ConversationHandler(
entry_points=[CallbackQueryHandler(start, pattern='^' + str(START) + '$')],
states={
PASSWORD: [MessageHandler(Filters.update.message, password)],
},
fallbacks=[CallbackQueryHandler(cancel, pattern='^' + str(CANCEL) + '$')],
per_message=True,
)
|
# Overall complexity : O(n ** 2)
def unique1(S):
for j in range(len(S)):
for k in range(j+1, len(S)):
if S[j] == S[k]:
return False
return True
# 1st iteration of outer loop causes n-1 iteraions of inner loop,
# 2nd iteration of outer loop causes n-2 iteraions of inner loop, and so on
# complexity: (n- 1) + (n -2) + (n -3) + ..... + 2 + 1 = (n -1) ( n- 1 + 1) / 2 = n(n-1) / 2 = O(n ** 2)
if __name__ == "__main__":
a1 = [1, 2, 3, 5, 7, 9, 4]
print("The elements of {} are {}".format(a1, 'unique' if unique1(a1) else 'not unique'))
a2 = [1, 2, 3, 5, 7, 9, 1]
print("The elements of {} are {}".format(a2, 'unique' if unique1(a2) else 'not unique'))
|
import django_filters
from django import forms
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from . import models
class CheckFilterForm(forms.Form):
"""Additional filter validations."""
def clean(self):
cleaned_data = self.cleaned_data
start = cleaned_data.get('start')
end = cleaned_data.get('end')
if start is not None and end is not None:
if start > end:
raise forms.ValidationError(
'End date must be greater than start date.')
elif (end - start).total_seconds() > 60 * 60 * 24:
raise forms.ValidationError(
'Start to end must be less than one day.')
return cleaned_data
class CheckResultFilter(django_filters.FilterSet):
"""Filter check results for a time range."""
start = django_filters.DateTimeFilter(
name='checked_on', lookup_type='gte', required=True)
end = django_filters.DateTimeFilter(
name='checked_on', lookup_type='lte', required=True)
class Meta:
model = models.CheckResult
form = CheckFilterForm
order_by = ('-checked_on', )
class BaseDomainCheckFormSet(BaseInlineFormSet):
"""Additional validations for required domain checks."""
def clean(self):
super().clean()
active = 0
for form in self.forms:
if form.is_valid() and form not in self.deleted_forms:
if form.cleaned_data.get('is_active', False):
active += 1
if active == 0:
msg = 'A domain must have at least one active check.'
raise forms.ValidationError(msg)
DomainCheckFormSet = inlineformset_factory(
parent_model=models.Domain, model=models.DomainCheck,
fields=('protocol', 'path', 'method', 'is_active', ),
formset=BaseDomainCheckFormSet,
extra=3, can_delete=False,
max_num=3, validate_max=True,
min_num=1, validate_min=True,
)
class DomainForm(forms.ModelForm):
"""Form to allow users to create/edit their own domains."""
class Meta:
model = models.Domain
fields = ('name', )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.checks = DomainCheckFormSet(
instance=self.instance, prefix='checks',
data=self.data if self.is_bound else None)
def is_valid(self):
domain_valid = super().is_valid()
checks_valid = self.checks.is_valid()
return domain_valid and checks_valid
def save(self, commit=True):
domain = super().save(commit=commit)
domain._checks = self.checks.save(commit=commit)
return domain
|
TrainingDays = 6
epochs = 50
avg_days = 5
std_days = 5
avg_window = 5
std_window = 5
|
import huggingface_hub
base_model = "sentence-transformers/paraphrase-mpnet-base-v2"
revision = "a867aefa094c578256b01667f75d841e5b7e0eaf"
model_path = huggingface_hub.snapshot_download(base_model, revision)
print(model_path)
|
#!/usr/bin/env python
import os
import argparse
import pandas
import numpy
pandas.set_option('display.max_columns', None) # or 1000
pandas.set_option('display.max_rows', None) # or 1000
pandas.set_option('display.max_colwidth', 300) # or 199
def main():
parser = argparse.ArgumentParser(description="Process the results of an experiment.")
parser.add_argument("experiment")
arguments = parser.parse_args()
path = f"experiments/{arguments.experiment}"
if not os.path.exists(path):
raise SystemExit(f"Path {path} does not exists.")
# For efficiency, one should generate the results from the parts without merging them.
files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]
frames = []
for file in files:
device, experiment, _ = file.split(".")
frame = pandas.read_csv(
os.path.join(path, file),
usecols=["group_index", "variable", "value_i"], dtype={"value_i": "Int64"}
)
frame["experiment"] = experiment
frames.append(frame)
dataframe = pandas.concat(frames)
frames = None
dataframe = dataframe[pandas.notna(dataframe['group_index'])]
dataframe["variable"], dataframe['iteration'] = zip(*dataframe.apply(lambda row: _split_fun(row), axis=1))
dataframe.set_index(["group_index", "iteration", "variable"], inplace=True)
dataframe = dataframe.groupby(["group_index", "iteration", "variable", "experiment"]).sum()
print(dataframe)
dataframe = dataframe['value_i'].unstack("variable")
dataframe['messaging_influence'] = (dataframe['remote'] - dataframe['processing']) / dataframe['latency']
dataframe['remote_transfer'] = dataframe['remote'] + dataframe['processing']
dataframe = dataframe.groupby("group_index").agg([
numpy.mean,
numpy.std,
"count"
])
dataframe.columns = dataframe.columns.map('_'.join)
dataframe.to_csv(f"{arguments.experiment}.csv")
def _split_fun(row):
if("_" in row.variable):
variable, iteration = row.variable.split("_")
else:
variable = row.variable
iteration = 0
return (variable, int(iteration))
def _percentile_factory(perc):
"""Percentile function usable within a group.
Source: https://stackoverflow.com/a/54593214
"""
def percentile_(values):
return numpy.percentile(values, perc)
percentile_.__name__ = f"percentile_{perc}"
return percentile_
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2019-07-27 01:06:36
# @Last Modified by: yulidong
# @Last Modified time: 2019-08-04 22:34:24
""" Training perception and control """
import argparse
from os.path import join, exists
from os import mkdir
import matplotlib.pyplot as plt
import torch
import torch.utils.data
from torch import optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.utils import save_image
import numpy as np
from models.vae import VAE
from models.action_vae import VAE_a
from models.controller import Controller
import visdom
from utils.misc import save_checkpoint
from utils.misc import LSIZE, RED_SIZE
## WARNING : THIS SHOULD BE REPLACE WITH PYTORCH 0.5
from utils.learning import EarlyStopping
from utils.learning import ReduceLROnPlateau
from data.loaders_fuse import RolloutObservationDataset
parser = argparse.ArgumentParser(description='VAE Trainer')
parser.add_argument('--batch-size', type=int, default=64*8, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=5000, metavar='N',
help='number of epochs to train (default: 1000)')
parser.add_argument('--logdir', default='log',type=str, help='Directory where results are logged')
parser.add_argument('--noreload', action='store_true',
help='Best model is not reloaded if specified')
parser.add_argument('--nosamples', action='store_true',
help='Does not save samples during training if specified')
args = parser.parse_args()
cuda = torch.cuda.is_available()
learning_rate=1e-3
torch.manual_seed(111)
# Fix numeric divergence due to bug in Cudnn
torch.backends.cudnn.benchmark = True
device = torch.device("cuda" if cuda else "cpu")
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((RED_SIZE, RED_SIZE)),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((RED_SIZE, RED_SIZE)),
transforms.ToTensor(),
])
trained=0
#model = VAE(3, LSIZE).to(device)
model=VAE(3, LSIZE)
model=torch.nn.DataParallel(model,device_ids=range(8))
model.cuda()
optimizer = optim.Adam(model.parameters(),lr=learning_rate,betas=(0.9,0.999))
# model_p=VAE_a(7, LSIZE)
# model_p=torch.nn.DataParallel(model_p,device_ids=range(8))
# model_p.cuda()
# optimizer_p = optim.Adam(model_p.parameters(),lr=learning_rate,betas=(0.9,0.999))
controller=Controller(LSIZE,3)
controller=torch.nn.DataParallel(controller,device_ids=range(8))
controller=controller.cuda()
optimizer_a = optim.SGD(controller.parameters(),lr=learning_rate*10)
# scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=5)
# earlystopping = EarlyStopping('min', patience=30)
vis = visdom.Visdom(env='fuse_train')
current_window = vis.image(
np.random.rand(64, 64),
opts=dict(title='current!', caption='current.'),
)
recon_window = vis.image(
np.random.rand(64, 64),
opts=dict(title='Reconstruction!', caption='Reconstruction.'),
)
mask_window = vis.image(
np.random.rand(64, 64),
opts=dict(title='mask!', caption='mask.'),
)
# future_window = vis.image(
# np.random.rand(64, 64),
# opts=dict(title='future!', caption='future.'),
# )
# pre_window = vis.image(
# np.random.rand(64, 64),
# opts=dict(title='prediction!', caption='prediction.'),
# )
loss_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='minibatches',
ylabel='Loss',
title='Training Loss',
legend=['Loss']))
lossc_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='minibatches',
ylabel='Loss',
title='Reconstruction Loss',
legend=['Reconstruction Loss']))
lossa_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='minibatches',
ylabel='Loss',
title='controller Loss',
legend=['controller Loss']))
# lossp_window = vis.line(X=torch.zeros((1,)).cpu(),
# Y=torch.zeros((1)).cpu(),
# opts=dict(xlabel='minibatches',
# ylabel='Loss',
# title='prediction Loss',
# legend=['prediction Loss']))
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, sigma):
""" VAE loss function """
#BCE = torch.mean(torch.sum(torch.pow(recon_x-x,2),dim=(1,2,3)))
mask= torch.sum(x,dim=1,keepdim=True)
mask= (mask<torch.mean(mask.view(mask.shape[0],1,-1))).float().cuda()
mask_distance=mask
#mask_distance[:,:,-16:,:]=mask_distance[:,:,-16:,:]*10
#mask_distance[:,:,-32:-16,:]=mask_distance[:,:,-32:-16,:]*5
#mask_distance[:,:,-48:-32,:]=mask_distance[:,:,-48:-32,:]*2
# BCE = F.mse_loss(recon_x*mask_distance,x*mask_distance,reduction='sum')/torch.sum(mask)+ \
# 0.1*F.mse_loss(recon_x*(1-mask),x*(1-mask),reduction='sum')/torch.sum(1-mask)
BCE = F.mse_loss(recon_x*mask_distance,x*mask_distance,reduction='sum')+ \
0.1*F.mse_loss(recon_x*(1-mask),x*(1-mask),reduction='sum')
BCE=BCE/x.shape[0]
#print(torch.mean(recon_x).item())
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
sigma=torch.max(sigma,1e-4*torch.ones_like(sigma).cuda())
#print(torch.mean(torch.pow(sigma,2)).item(),torch.mean(torch.pow(mu,2)).item())
KLD = -0.5 * torch.sum(1 + torch.log(torch.pow(sigma,2)) - torch.pow(mu,2) - torch.pow(sigma,2),dim=1)
#KLD=torch.max(KLD,torch.ones_like(KLD).cuda()*LSIZE*0.5)
KLD=torch.mean(KLD)
# print(KLD.shape,sigma.shape,mu.shape)
# exit()
print(BCE.item(),KLD.item())
return BCE + KLD
def train(epoch,vae_dir,training_sample):
""" One training epoch """
model.train()
# model_p.train()
controller.train()
train_loss = []
for batch_idx, [data,action,pre] in enumerate(train_loader):
#torch.autograd.set_detect_anomaly(True)
data = data.cuda()
action=action.cuda()
pre=pre.cuda()
optimizer.zero_grad()
# optimizer_p.zero_grad()
optimizer_a.zero_grad()
recon_c, mu_c, var_c = model(data)
loss_c = loss_function(recon_c, data, mu_c, var_c)
recon_f, mu_f, var_f = model(pre)
loss_f = loss_function(recon_f, pre, mu_f, var_f)
# recon_p, mu_p, var_p = model_p(torch.cat([data,action],dim=1))
# loss_p = loss_function(recon_p, pre, mu_p, var_p)
mu, sigma = mu_c.detach().cuda(), var_c.detach().cuda()
#sigma = torch.exp(sigma/2.0)
epsilon = torch.randn_like(sigma)
z=mu+sigma*epsilon
z=z.cuda().view(data.shape[0],-1).detach()
action_p=controller(z)
#print(action[:,:,0,0])
loss_a=F.mse_loss(action_p,action[:,:3,11,11],reduction='mean')
#action_pr=torch.cat([action_p.detach().view(action_p.shape[0],3,1,1).expand(action_p.shape[0],3,action.shape[-2],action.shape[-1]),action[:,2:3,...]])
# action_pr=action_p.detach().view(action_p.shape[0],3,1,1).expand(action_p.shape[0],3,action.shape[-1],action.shape[-2])
# action_pr=torch.cat([action_pr,action[:,2:3,:,:]],dim=1)
# recon_pr, mu_pr, var_pr = model_p(torch.cat([data,action_pr],dim=1))
# loss_pr = loss_function(recon_pr, pre, mu_pr, var_pr)
loss=loss_c+loss_f+loss_a
if torch.isnan(loss) or torch.isinf(loss):
print('nan or inf error:',loss.item() )
continue
loss.backward()
#print(loss.item())
train_loss.append(loss.item())
optimizer.step()
optimizer_a.step()
ground = data[0,...].data.cpu().numpy().astype('float32')
ground = np.reshape(ground, [3,64, 64])
vis.image(
ground,
opts=dict(title='ground!', caption='ground.'),
win=current_window,
)
image = recon_c[0,...].data.cpu().numpy().astype('float32')
image = np.reshape(image, [3, 64, 64])
vis.image(
image,
opts=dict(title='Reconstruction!', caption='Reconstruction.'),
win=recon_window,
)
image=np.sum(ground,axis=0)
image=(image<np.mean(image)).astype('float32')
vis.image(
image,
opts=dict(title='Reconstruction!', caption='Reconstruction.'),
win=mask_window,
)
# ground = pre[0,...].data.cpu().numpy().astype('float32')
# ground = np.reshape(ground, [3,64, 64])
# vis.image(
# ground,
# opts=dict(title='future!', caption='ground.'),
# win=future_window,
# )
# image = recon_p[0,...].data.cpu().numpy().astype('float32')
# image = np.reshape(image, [3, 64, 64])
# vis.image(
# image,
# opts=dict(title='prediction!', caption='prediction.'),
# win=pre_window,
# )
# if loss.item()>5:
# loss=loss/loss
# if loss_c>5:
# loss_c=loss_c/loss_c
# if loss_a>5:
# loss_a=loss_c/loss_a
# if loss_c>5:
# loss_p=loss_c/loss_p
vis.line(
X=torch.ones(1).cpu() *training_sample,
Y=loss.item() * torch.ones(1).cpu(),
win=loss_window,
update='append')
vis.line(
X=torch.ones(1).cpu() * training_sample,
Y=loss_c.item() * torch.ones(1).cpu(),
win=lossc_window,
update='append')
vis.line(
X=torch.ones(1).cpu() * training_sample,
Y=loss_a.item() * torch.ones(1).cpu(),
win=lossa_window,
update='append')
training_sample+=1
if batch_idx % 1 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}% training_sample:{:.0f}] Loss_c: {:.4f} Loss_f: {:.4f} Loss_a: {:.4f} '.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
len(data) * batch_idx / len(train_loader)/10,training_sample,
loss_c.item(),loss_f.item(),loss_a.item()))
# if (batch_idx+1)%1000==0:
# best_filename = join(vae_dir, 'best.pkl')
# filename_vae = join(vae_dir, 'vae_checkpoint_'+str(epoch)+'.pkl')
# filename_pre = join(vae_dir, 'pre_checkpoint_'+str(epoch)+'.pkl')
# filename_control = join(vae_dir, 'contorl_checkpoint_'+str(epoch)+'.pkl')
# # is_best = not cur_best or test_loss < cur_best
# # if is_best:
# # cur_best = test_loss
# is_best=False
# save_checkpoint({
# 'epoch': epoch,
# 'state_dict': model.state_dict(),
# 'optimizer': optimizer.state_dict(),
# }, is_best, filename_vae, best_filename)
# save_checkpoint({
# 'epoch': epoch,
# 'state_dict': model_p.state_dict(),
# 'optimizer': optimizer_p.state_dict(),
# }, is_best, filename_pre, best_filename)
# save_checkpoint({
# 'epoch': epoch,
# 'state_dict': controller.state_dict(),
# 'optimizer': optimizer_a.state_dict(),
# }, is_best, filename_control, best_filename)
return training_sample
def test():
""" One test epoch """
model.eval()
test_loss = []
with torch.no_grad():
for batch_idx, [data,action,pre] in enumerate(train_loader):
data = data.cuda()
recon_batch, mu, var = model(data)
test_loss.append(loss_function(recon_batch, data, mu, var).item())
ground = data[0, ...].data.cpu().numpy().astype('float32')
ground = np.reshape(ground, [3, 64, 64])
vis.image(
ground,
opts=dict(title='ground!', caption='ground.'),
win=current_window,
)
image = recon_batch[0,...].data.cpu().numpy().astype('float32')
image = np.reshape(image, [3, 64, 64])
vis.image(
image,
opts=dict(title='image!', caption='image.'),
win=recon_window,
)
test_loss =np.mean(test_loss)
print('====> Test set loss: {:.4f}'.format(test_loss))
return test_loss
# check vae dir exists, if not, create it
vae_dir = join(args.logdir, 'vae_fuse')
if not exists(vae_dir):
mkdir(vae_dir)
mkdir(join(vae_dir, 'samples'))
# reload_file = join(vae_dir, 'best.pkl')
# if not args.noreload and exists(reload_file):
# state = torch.load(reload_file)
# print("Reloading model at epoch {}"
# ", with test error {}".format(
# state['epoch'],
# state['precision']))
# model.load_state_dict(state['state_dict'])
# optimizer.load_state_dict(state['optimizer'])
# trained=state['epoch']
#trained=0
# scheduler.load_state_dict(state['scheduler'])
# earlystopping.load_state_dict(state['earlystopping'])
state = torch.load('/home/ld/gym-car/log/vae/contorl_checkpoint_52.pkl')
controller.load_state_dict(state['state_dict'])
optimizer_a.load_state_dict(state['optimizer'])
print('contorller load success')
# state = torch.load('/home/ld/gym-car/log/vae/pre_checkpoint_52.pkl')
# model_p.load_state_dict(state['state_dict'])
# optimizer_p.load_state_dict(state['optimizer'])
# print('prediction load success')
state = torch.load('/home/ld/gym-car/log/vae/vae_checkpoint_52.pkl')
model.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
trained=state['epoch']
print('vae load success')
trained=0
cur_best = None
all_data=6000
sample_data=1000
sample_buff=all_data/sample_data
sample_count=0
training_sample=0
for epoch in range(trained+1, args.epochs + 1):
dataset_train = RolloutObservationDataset(root='/data/corner/',root2='/data/result/',transform=transform_train, train=True,sample_data=sample_data,sample_count=sample_count)
#dataset_test = RolloutObservationDataset('/data/result/',transform_test, train=False,sample_data=sample_data,sample_count=sample_count)
sample_count+=1
if sample_count==sample_buff:
sample_count=0
train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=32,drop_last=True)
# test_loader = torch.utils.data.DataLoader(
# dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=32,drop_last=True)
training_sample=train(epoch,vae_dir,training_sample)
#exit()
#test_loss = test()
# scheduler.step(test_loss)
# earlystopping.step(test_loss)
# checkpointing
best_filename = join(vae_dir, 'best.pkl')
filename_vae = join(vae_dir, 'vae_checkpoint_'+str(epoch)+'.pkl')
filename_pre = join(vae_dir, 'pre_checkpoint_'+str(epoch)+'.pkl')
filename_control = join(vae_dir, 'contorl_checkpoint_'+str(epoch)+'.pkl')
# is_best = not cur_best or test_loss < cur_best
# if is_best:
# cur_best = test_loss
is_best=False
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, is_best, filename_vae, best_filename)
# save_checkpoint({
# 'epoch': epoch,
# 'state_dict': model_p.state_dict(),
# 'optimizer': optimizer_p.state_dict(),
# }, is_best, filename_pre, best_filename)
save_checkpoint({
'epoch': epoch,
'state_dict': controller.state_dict(),
'optimizer': optimizer_a.state_dict(),
}, is_best, filename_control, best_filename)
if not args.nosamples:
print('saving image')
with torch.no_grad():
sample = torch.randn(RED_SIZE, LSIZE).cuda()
sample = model.module.decoder(sample).cpu()
save_image(np.reshape(sample,[RED_SIZE, 3, RED_SIZE, RED_SIZE]),
join(vae_dir, 'samples/sample_' + str(epoch) + '.png'))
# if earlystopping.stop:
# print("End of Training because of early stopping at epoch {}".format(epoch))
# break
|
import time
import numpy as np
import tensorflow as tf
from actor_critic import ActorCritic
def pd_test(env_fn, policy, load_path):
env = env_fn()
actions = env.unwrapped.action_list
env._seed(int(time.time()))
obs = env.reset()
obs = np.expand_dims(obs, axis=0)
action_list = []
with tf.Session() as sess:
actor_critic = ActorCritic(sess, policy, env.observation_space.shape, env.action_space, 1, 5)
if load_path:
actor_critic.load(load_path)
else:
sess.run(tf.global_variables_initializer())
print('WARNING: No Model Loaded!')
print(env.unwrapped.scramble_current)
d = False
while not d:
print('-------------------------------------------------')
print('Current Observation')
env.render()
a, v, neg = actor_critic.act(obs, stochastic=True)
print('')
print('action: ', actions[a[0]])
print('value: ', v)
print('neglogp: ', neg)
print('pd: ')
for ac, pd in zip(actions, actor_critic.step_model.logits(obs)[0][0]):
print('\t', ac, pd)
obs, r, d, _ = env.step(a[0])
print('r: ', r)
obs = np.expand_dims(obs, axis=0)
env.render()
env.close()
|
from typing import List, Tuple
import pweave
def _import_block(sparclur_path):
if sparclur_path is None:
sparclur_path_import = ''
else:
sparclur_path_import = """
module_path = os.path.abspath('%s')
if module_path not in sys.path:
sys.path.append(module_path)
""" % sparclur_path
imports = """
```python; echo=False, name='Imports'
%%capture
import plotly.graph_objects as go
import plotly
import itertools
import sys
import os
import numpy as np
import tempfile
import fitz
from IPython.display import Image
{sparclur_import}
from sparclur.utils.tools import gen_flatten
from sparclur.parsers.present_parsers import get_sparclur_texters, \
get_sparclur_renderers, \
get_sparclur_tracers, \
get_sparclur_parsers
from sparclur.prc.viz import PRCViz
fitz.TOOLS.mupdf_display_errors(False);
```\n\n
""".format(sparclur_import=sparclur_path_import)
return imports
def _ptc_block(file_path, idx, num_files):
block = """
```python; echo=False, name='PTC %i/%i'
tracers = {parser.get_name(): parser for parser in get_sparclur_tracers()}
col_names = list(tracers.keys())
col_names.sort()
cleaned_messages = []
kwargs ={'doc': '%s'}
for parser_name in col_names:
if parser_name == 'MuPDF':
kwargs['parse_streams'] = True
elif 'parse_streams' in kwargs:
del kwargs['parse_streams']
parser = tracers[parser_name](**kwargs)
cleaned_messages.append(parser.cleaned)
num_lines = min(max([len(messages) for messages in cleaned_messages]), 20)
cell_entries = np.ndarray(shape=(len(col_names), num_lines), dtype=object)
cell_entries.fill('')
for (col, parser_messages) in enumerate(cleaned_messages):
overflow = len(parser_messages) - 19
for (row, entry) in enumerate(parser_messages.items()):
if overflow > 1 and row == 19:
cell_entries[col, row] = str(overflow) + ' more messages'
elif row < 20:
cell_entries[col, row] = str(entry[0])+': '+str(entry[1])
fig = go.Figure(data=[go.Table(
header=dict(values=col_names, fill_color='lightsteelblue'),
cells=dict(values=cell_entries, fill_color='lavender'))])
fig.update_layout(height=(num_lines + 2) * 50 + 100, margin=dict(r=5, l=5, t=5, b=5))
im_bytes = fig.to_image(format="png")
display(Image(im_bytes))
```
""" % (idx, num_files, file_path)
return block
def _pxc_block(file_path, idx, num_files):
block = """
```python; echo=False, name='PXC %i/%i'
RENDERERS = [parser.get_name() for parser in get_sparclur_renderers()]
kwargs = {'doc': '%s'}
texters = dict()
for parser in get_sparclur_texters():
if parser.get_name() in RENDERERS:
texters[parser.get_name()] = parser(dpi=72, **kwargs)
else:
texters[parser.get_name()] = parser(**kwargs)
present_texters = list(texters.keys())
present_texters.sort()
txt_idx = {txtr:idx for (idx,txtr) in enumerate(present_texters)}
metrics = dict()
comparisons = list(itertools.combinations(texters, 2))
for combo in comparisons:
try:
metrics[frozenset(combo)] = texters[combo[0]].compare_text(texters[combo[1]]);
except:
metrics[frozenset(combo)] = None
data = np.zeros((len(texters)+1, len(texters)), dtype=object)
data.fill('')
for row in texters.keys():
for col in texters.keys():
if row == col:
data[txt_idx[row]+1, txt_idx[col]] = 1.0
else:
try:
data[txt_idx[row]+1, txt_idx[col]] = 1 - metrics[frozenset((row, col))];
data[txt_idx[col]+1, txt_idx[row]] = 1 - metrics[frozenset((row, col))];
except:
data[txt_idx[row]+1, txt_idx[col]] = -1
data[txt_idx[col]+1, txt_idx[row]] = -1
for name in present_texters:
data[0, txt_idx[name]] = name
header = present_texters
header.insert(0, '')
format = [[None]]
format = format + [['.3f']] * len(present_texters)
fig = go.Figure(data=[go.Table(
header=dict(values=header,
fill_color='lightsteelblue'),
cells=dict(values=data,
fill_color=['lightsteelblue'] + ['lavender' for _ in range(5)],
format=format))])
fig.update_layout(height=150, margin=dict(r=5, l=5, t=5, b=5))
im_bytes = fig.to_image(format="png")
display(Image(im_bytes))
```
""" % (idx, num_files, file_path)
return block
def _prc_block(file_path, idx, num_files):
block = """
```python; echo=False, name='PRC %i/%i'
try:
viz = PRCViz('%s', dpi = 72)
ssims = [(page, ssim.ssim) for (_, doc) in viz._ssims.items() for (page, ssim) in doc.items()]
ssims.sort(key=lambda x: x[1])
page = ssims[0][0]
display(viz.plot_ssims())
display(viz.display(page))
except:
print('PRC Failed')
```
""" % (idx, num_files, file_path)
return block
def _parse_document_input(doc: str or Tuple[str]):
if isinstance(doc, str) or (isinstance(doc, tuple) and len(doc) < 2):
parsed_doc = (doc, None)
else:
parsed_doc = (doc[0], doc[1])
return parsed_doc
class SparclurReport:
"""
Generate a Pweave document of the SPARCLUR results over a collection of documents. Pweave can be used to generate
a report of the SPARCLUR findings.
"""
def __init__(self, docs: str or List[str] or Tuple[str] or List[Tuple[str]],
save_path: str,
kernel: str = "python3",
title: str = "SPARCLUR Report",
sparclur_path=None
):
"""
Parameters
----------
docs: str or List[str] or Tuple[str] or List[Tuple[str]]
Single path or list of paths to PDF's to be analyzed. Optionally can include secondary information
to be printed in the report as tuple of (path, comment).
kernel: The IPython kernel to use for Pweave
save_path: str
The save path of the report
title: str
The title of the report
"""
if not isinstance(docs, list):
docs = [docs]
self._docs = [_parse_document_input(doc) for doc in docs]
self._kernel = kernel
self._save_path = save_path
self._title = title
self._sparclur_path = sparclur_path
@property
def title(self):
return self._title
@title.setter
def title(self, t: str):
self._title = t
@property
def kernel(self):
return self._kernel
@kernel.setter
def kernel(self, k):
self._kernel = k
@property
def save_path(self):
return self._save_path
@save_path.setter
def save_path(self, sp: str):
self._save_path = sp
def generate_report(self):
"""
Runs report generation.
"""
num_files = len(self._docs)
pmd = "# %s\n\n" % self._title
pmd = pmd + _import_block(self._sparclur_path)
for idx, (file, info) in enumerate(self._docs):
if info is not None:
info = info + '\n\n'
else:
info = ''
file_name = file.split('/')[-1]
pmd = pmd + '___\n\n' + file_name + '\n\n' + info + 'Parser Traces\n\n' + _ptc_block(
file, idx, num_files) + '\n\nParser Text Comparator\n\n' + _pxc_block(
file, idx, num_files) + '\n\nPDF Renderer Comparator\n\n' + _prc_block(file, idx, num_files) + '\n\n'
with open(self._save_path, 'w') as out_file:
out_file.write(pmd)
pweave.weave(self._save_path, kernel=self._kernel)
|
#
# 682. Baseball Game
#
# Q: https://leetcode.com/problems/baseball-game/
# A: https://leetcode.com/problems/baseball-game/discuss/107929/C%2B%2B-and-Javascript-solutions
#
from typing import List
class Solution:
def calPoints(self, ops: List[str]) -> int:
s = []
for op in ops:
if op == "+":
s.append(s[-2] + s[-1])
elif op == "D":
s.append(2 * s[-1])
elif op == "C":
s.pop()
else:
s.append(int(op))
return sum(s)
|
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Tushar Krishna
from __future__ import print_function
from __future__ import absolute_import
from m5.params import *
from m5.objects import *
from common import FileSystemConfig
from .BaseTopology import SimpleTopology
# Creates a generic 3D Mesh assuming an equal number of cache
# and directory controllers.
# XYZ routing is enforced (using link weights)
# to guarantee deadlock freedom.
class VBL_MeshXYZ(SimpleTopology):
description='VBL_MeshXYZ'
def __init__(self, controllers):
self.nodes = controllers
# Makes a generic 3D mesh
# assuming an equal number of cache and directory cntrls
def makeTopology(self, options, network, IntLink, ExtLink, Router):
print("File: VBL_MeshXYZ.py")
nodes = self.nodes
num_routers = options.num_cpus
x_depth = options.mesh_cols
# default values for link latency and router latency.
# Can be over-ridden on a per link/router basis
link_latency = options.link_latency # used by simple and garnet
router_latency = options.router_latency # only used by garnet
# There must be an evenly divisible number of cntrls to routers
# Also, obviously the number or rows must be <= the number of routers
cntrls_per_router, remainder = divmod(len(nodes), num_routers)
assert(x_depth > 0 and x_depth <= num_routers)
if (options.z_depth>0):
z_depth=options.z_depth
else:
z_depth = int(num_routers/x_depth/x_depth)
if (options.mesh_rows>0):
y_depth=options.mesh_rows
else:
y_depth = int(num_routers / x_depth /z_depth)
assert(z_depth * y_depth * x_depth == num_routers)
print("Total Number Routers: ", num_routers)
print("x_depth: ", x_depth)
print("y_depth: ", y_depth)
print("z_depth: ", z_depth)
# Create the routers in the mesh
routers = [Router(router_id=i, latency = router_latency) \
for i in range(num_routers)]
network.routers = routers
# link counter to set unique link ids
link_count = 0
# Add all but the remainder nodes to the list of nodes to be uniformly
# distributed across the network.
network_nodes = []
remainder_nodes = []
for node_index in range(len(nodes)):
if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index])
else:
remainder_nodes.append(nodes[node_index])
# Connect each node to the appropriate router
ext_links = []
for (i, n) in enumerate(network_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < cntrls_per_router)
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = link_latency))
link_count += 1
# Connect the remaining nodes to router 0. These should only be
# DMA nodes.
for (i, node) in enumerate(remainder_nodes):
assert(node.type == 'DMA_Controller')
assert(i < remainder)
ext_links.append(ExtLink(link_id=link_count, ext_node=node,
int_node=routers[0],
latency = link_latency))
link_count += 1
network.ext_links = ext_links
# Create the mesh links.
int_links = []
total=link_count
# East output to West input links (weight = 1)
for z in range(z_depth):
for x in range(x_depth):
for y in range(y_depth):
if (y + 1 < y_depth):
east_out = y + (x * y_depth) + (z * y_depth * x_depth)
west_in=(y+1)+(x*y_depth)+(z*y_depth*x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[east_out],
dst_node=routers[west_in],
src_outport="East",
dst_inport="West",
latency = link_latency,
weight=1))
link_count += 1
print("\nNUM EAST-WEST LINKS = ", link_count-total)
total=link_count
# West output to East input links (weight = 1)
for z in range(z_depth):
for x in range(x_depth):
for y in range(y_depth):
if (y + 1 < y_depth):
east_in = y + (x * y_depth) + (z * y_depth * x_depth)
west_out = (y + 1) + (x * y_depth) + (z * y_depth * x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[west_out],
dst_node=routers[east_in],
src_outport="West",
dst_inport="East",
latency = link_latency,
weight=1))
link_count += 1
print("NUM WEST-EAST LINKS = ", link_count-total)
total=link_count
# North output to South input links (weight = 2)
for z in range(z_depth):
for x in range(x_depth):
for y in range(y_depth):
if (x + 1 < x_depth):
north_out = y + (x * y_depth) + (z * y_depth * x_depth)
south_in = y + ((x + 1) * y_depth) + (z * y_depth * x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[north_out],
dst_node=routers[south_in],
src_outport="North",
dst_inport="South",
latency = link_latency,
weight=2))
link_count += 1
print("NUM NORTH-SOUTH LINKS = ", link_count-total)
total=link_count
# South output to North input links (weight = 2)
for z in range(z_depth):
for x in range(x_depth):
for y in range(y_depth):
if (x + 1 < x_depth):
north_in = y + (x * y_depth) + (z * y_depth * x_depth)
south_out = y + ((x + 1) * y_depth) + (z * y_depth * x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[south_out],
dst_node=routers[north_in],
src_outport="South",
dst_inport="North",
latency = link_latency,
weight=2))
link_count += 1
print("NUM SOUTH-NORTH LINKS = ", link_count-total)
total=link_count
# Up output to Down input links (weight = 3)
for z in range(z_depth):
for y in range(y_depth):
for x in range(x_depth):
if (z + 1 < z_depth):
up_out = x + (y * x_depth) + (z * y_depth * x_depth)
down_in = x + (y * x_depth) + ((z + 1) * y_depth * x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[up_out],
dst_node=routers[down_in],
src_outport="Up",
dst_inport="Down",
latency = link_latency,
weight=3))
link_count += 1
print("NUM UP-DOWN LINKS = ", link_count-total)
total=link_count
# Down output to Up input links (weight = 3)
for z in range(z_depth):
for y in range(y_depth):
for x in range(x_depth):
if (z + 1 < z_depth):
up_in = x + (y * x_depth) + (z * y_depth * x_depth)
down_out = x + (y * x_depth) + ((z + 1) * y_depth * x_depth)
int_links.append(IntLink(link_id=link_count,
src_node=routers[down_out],
dst_node=routers[up_in],
src_outport="Down",
dst_inport="Up",
latency = link_latency,
weight=3))
link_count += 1
print("NUM DOWN-UP LINKS = ", link_count-total)
total=link_count
print("TOTAL NUM LINKS = ", len(int_links), "\n")
network.int_links = int_links
# Register nodes with filesystem
def registerTopology(self, options):
for i in range(options.num_cpus):
FileSystemConfig.register_node([i],
MemorySize(options.mem_size) // options.num_cpus, i)
|
import torch
from sklearn.metrics import r2_score
def my_metric(output, target):
with torch.no_grad():
#pred = torch.argmax(output, dim=1)
#assert pred.shape[0] == len(target)
#correct = 0
#correct += torch.sum(output == target).item()
output = output.cpu()
target = target.cpu()
output = output.detach().numpy()
target = target.detach().numpy()
R2 = r2_score(output, target)
return R2#correct / len(target)
'''
def my_metric2(output, target, k=3):
with torch.no_grad():
#pred = torch.topk(output, k, dim=1)[1]
#assert pred.shape[0] == len(target)
correct = 0
#for i in range(k):
correct += torch.sum(output == target).item()
return correct / len(target)
'''
|
""" Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
_has_silu = 'silu' in dir(torch.nn.functional)
if _has_silu:
def nswish(x, inplace: bool = False):
return F.silu(x).mul_(1.676531339) if inplace else F.silu(x).mul(1.676531339)
else:
def nswish(x, inplace: bool = False):
"""Normalized Swish
"""
return x.mul_(x.sigmoid()).mul_(1.676531339) if inplace else x.mul(x.sigmoid()).mul(1.676531339)
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class NSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(NSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return nswish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
|
# pylint: disable=no-self-use, unused-argument, redefined-outer-name
import logging
from pathlib import Path
import pytest
import drvn.installer._utils as utils
@pytest.fixture(scope="class")
def workspace():
workspace_path = _set_up_workspace()
yield workspace_path
_tear_down_workspace()
class TestDrvnInstallerScript:
def test_help_exits_with_returncode_zero(self):
_assert_returncode_zero("drvn_installer --help")
def test_normal_run_exits_with_returncode_zero(self, workspace):
_assert_returncode_zero("drvn_installer", cwd=workspace)
# TODO: do some install testing in a docker container
_assert_returncode_zero = utils.try_cmd
def _set_up_workspace():
workspace_path = _get_workspace_path()
logging.debug("Setting up integration test workspace ...")
utils.try_cmd(f"mkdir -p {workspace_path}")
return workspace_path
def _tear_down_workspace():
workspace_path = _get_workspace_path()
logging.debug("Tearing down integration test workspace ...")
utils.try_cmd(f"rm -rf {workspace_path}")
def _get_workspace_path():
workspace_path = Path("/tmp/python_installer/integration_workspace")
return workspace_path
|
import os
from django.contrib.auth.models import AbstractUser
from django.db import models
class Icon(models.Model):
image = models.ImageField(upload_to="user-icons")
def __str__(self) -> str:
return os.path.basename(self.image.name)
class User(AbstractUser):
username = models.CharField(
"username",
max_length=150,
unique=True,
help_text="Required. 150 characters or less",
)
icon = models.ForeignKey("Icon", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
ordering = ["-id"]
|
import sys
import onnx
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
import logging
if len(sys.argv) != 2:
print("Usage: %s <onnx-file>" % sys.argv[0])
exit(1)
onnx_model = onnx.load(sys.argv[1])
input_name = "input.1"
x = np.random.randn(10, 3, 224, 224)
# # mybert
# input_name = "data"
# x = np.random.randn(64, 1024)
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = "cuda -libs=cudnn,cublas"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
# logging.getLogger("compile_engine").setLevel(logging.INFO)
# logging.getLogger("compile_engine").addHandler(logging.StreamHandler(sys.stdout))
with tvm.transform.PassContext(opt_level=3):
# Don't use GraphExecutor, which is for debugging only
lib = relay.build(mod, target=target, params=params)
ctx = tvm.gpu(0)
dtype = "float32"
data_tvm = tvm.nd.array(x.astype(dtype), ctx=ctx)
module = tvm.contrib.graph_runtime.GraphModule(lib["default"](ctx))
module.set_input(input_name, data_tvm)
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=50)
prof_res = np.array(ftimer().results) * 1e3
print("Time cost is: ", np.mean(prof_res), "ms", " stddev = ", np.std(prof_res), "ms")
|
s = 'Python is Awesome'
# without start and end
print(s.startswith('Python'))
# with start index
print(s.startswith('Python', 3))
print(s.startswith('hon', 3))
# with start and end index
print(s.startswith('is', 7, 10))
print(s.startswith('Python is', 0, 10))
print(s.startswith('Python is', 0, 5))
# prefix as tuple
print(s.startswith(('is', 'Python')))
print(s.startswith(('is', 'hon'), 7))
|
import pickle
import tensorflow as tf
import numpy as np
import pandas as pd
import yaml
import json
import os
from importlib.machinery import SourceFileLoader
def save_pickle_file(outlist, filepath):
"""Save to pickle file."""
with open(filepath, 'wb') as f:
pickle.dump(outlist, f)
def load_pickle_file(filepath):
"""Load pickle file."""
with open(filepath, 'rb') as f:
outlist = pickle.load(f)
return outlist
def save_json_file(outlist, filepath):
"""Save to json file."""
with open(filepath, 'w') as json_file:
json.dump(outlist, json_file)
def load_json_file(filepath):
"""Load json file."""
with open(filepath, 'r') as json_file:
file_read = json.load(json_file)
return file_read
def load_yaml_file(fname):
"""Load yaml file."""
with open(fname, 'r') as stream:
outlist = yaml.safe_load(stream)
return outlist
def save_yaml_file(outlist, fname):
"""Save to yaml file."""
with open(fname, 'w') as yaml_file:
yaml.dump(outlist, yaml_file, default_flow_style=False)
def load_hyper_file(file_name):
"""Load hyper-parameters from file. File type can be '.yaml', '.json', '.pickle' or '.py'
Args:
file_name (str): Path or name of the file containing hyper-parameter.
Returns:
hyper (dict): Dictionary of hyper-parameters.
"""
if "." not in file_name:
print("ERROR:kgcnn: Can not determine file-type.")
return {}
type_ending = file_name.split(".")[-1]
if type_ending == "json":
return load_json_file(file_name)
elif type_ending == "yaml":
return load_yaml_file(file_name)
elif type_ending == "pickle":
return load_pickle_file(file_name)
elif type_ending == "py":
path = os.path.realpath(file_name)
hyper = getattr(SourceFileLoader(os.path.basename(path).replace(".py", ""), path).load_module(), "hyper")
return hyper
else:
print("ERROR:kgcnn: Unsupported file type %s" % type_ending)
return {}
def ragged_tensor_from_nested_numpy(numpy_list: list):
"""Make ragged tensor from a list of numpy arrays. Each array can have different length but must match in shape
with exception of the first dimension.
This will result in a ragged tensor with ragged dimension only at first axis (ragged_rank=1), like
shape `(batch, None, ...)`. This way a tensor can be generated faster than tf.ragged.constant().
Warning: The data will be copied for this operation.
.. code-block:: python
import tensorflow as tf
import numpy as np
ragged_tensor = ragged_tensor_from_nested_numpy([np.array([[0]]), np.array([[1], [2], [3]])])
print(ragged_tensor)
# <tf.RaggedTensor [[[0]], [[1], [2], [3]]]>
print(ragged_tensor.shape)
# (2, None, 1)
Args:
numpy_list (list): List of numpy arrays of different length but else identical shape.
Returns:
tf.RaggedTensor: Ragged tensor of former nested list of numpy arrays.
"""
return tf.RaggedTensor.from_row_lengths(np.concatenate(numpy_list, axis=0),
np.array([len(x) for x in numpy_list], dtype="int"))
def pandas_data_frame_columns_to_numpy(data_frame, label_column_name, print_context: str = ""):
"""Convert a selection of columns from a pandas data frame to a single numpy array.
Args:
data_frame (pd.DataFrame): Pandas Data Frame.
label_column_name (list, str): Name or list of columns to convert to a numpy array.
print_context (str): Context for error message. Default is "".
Returns:
np.ndarray: Numpy array of the data in data_frame selected by label_column_name.
"""
if isinstance(label_column_name, str):
out_array = np.expand_dims(np.array(data_frame[label_column_name]), axis=-1)
elif isinstance(label_column_name, list):
out_array = []
for x in label_column_name:
if isinstance(x, int):
x_col = np.array(data_frame.iloc[:, x])
elif isinstance(x, str):
x_col = np.array(data_frame[x])
else:
raise ValueError(print_context + "Column list must contain name or position but got %s" % x)
if len(x_col.shape) <= 1:
x_col = np.expand_dims(x_col, axis=-1)
out_array.append(x_col)
out_array = np.concatenate(out_array, axis=-1)
elif isinstance(label_column_name, slice):
out_array = np.array(data_frame.iloc[:, label_column_name])
else:
raise ValueError(print_context + "Column definition must be list or string, got %s" % label_column_name)
return out_array
|
from pprint import pprint
from collections import OrderedDict
import json
import networkx as nx
import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def calc_score(x, y):
return x + (1 - x) * math.pow(y, (1/x))
LABELED_FILE = 'data/ORIG/label5000.txt'
labeled_pairs = {}
# build a simple KB
for line in open(LABELED_FILE, encoding='utf-8'):
ls = line.strip().split('\t')
termab = (ls[2], ls[3])
if int(ls[4]) == 1:
if termab not in labeled_pairs:
labeled_pairs[termab] = 1
else:
labeled_pairs[termab] += 1
G = nx.Graph()
for pair in labeled_pairs:
G.add_edge(pair[0], pair[1], weight=labeled_pairs[pair])
print(G.number_of_nodes())
print(G.number_of_edges())
def calculate_semantic_score(G, terma, termb):
if G.has_node(terma) and G.has_node(termb):
terma_neighbors = set(nx.neighbors(G, terma))
termb_neighbors = set(nx.neighbors(G, termb))
intersect = terma_neighbors.intersection(termb_neighbors)
intersect_score = sum((G[node][terma]['weight'] + G[node][termb]['weight']) for node in intersect)
union_score = sum(G[node][terma]['weight'] for node in terma_neighbors) + \
sum(G[node][termb]['weight'] for node in termb_neighbors)
if G.has_edge(terma, termb):
intersect_score += G[terma][termb]['weight']
union_score += G[terma][termb]['weight']
return float(intersect_score)/union_score
else:
return 0.0
proba_dict = OrderedDict()
with open('lstm_proba.txt', encoding='utf-8') as proba_file:
for idx, line in enumerate(proba_file):
ls = line.strip().split('\t')
terma = ls[0]
termb = ls[1]
proba = float(json.loads(ls[2])[0])
proba_dict[idx] = {'terma': terma, 'termb': termb, 'proba': proba}
to_add = [1]
while to_add:
print('Starting Iter')
for key in proba_dict:
proba = proba_dict[key]['proba']
terma = proba_dict[key]['terma']
termb = proba_dict[key]['termb']
semantic_score = calculate_semantic_score(G, terma, termb)
proba_dict[key]['semantic'] = semantic_score
proba_dict[key]['score'] = calc_score(proba_dict[key]['proba'], proba_dict[key]['semantic'])
toplist = sorted(proba_dict.items(), key=lambda x: x[1]['score'], reverse=True)
print(toplist[:5])
to_add = [item for item in toplist if item[1]['score'] >= 0.9]
print(to_add)
for item in to_add:
del proba_dict[item[0]]
terma = item[1]['terma']
termb = item[1]['termb']
if G.has_edge(terma, termb):
G[terma][termb]['weight'] += 1
else:
G.add_edge(terma, termb, weight=1)
print(G.number_of_nodes())
print(G.number_of_edges())
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.19
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CertificatesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/certificates.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
"""Component to embed Aqualink devices."""
from __future__ import annotations
import asyncio
from functools import wraps
import logging
import aiohttp.client_exceptions
from iaqualink.client import AqualinkClient
from iaqualink.device import (
AqualinkBinarySensor,
AqualinkDevice,
AqualinkLight,
AqualinkSensor,
AqualinkThermostat,
AqualinkToggle,
)
from iaqualink.exception import AqualinkServiceException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, UPDATE_INTERVAL
_LOGGER = logging.getLogger(__name__)
ATTR_CONFIG = "config"
PARALLEL_UPDATES = 0
PLATFORMS = [
BINARY_SENSOR_DOMAIN,
CLIMATE_DOMAIN,
LIGHT_DOMAIN,
SENSOR_DOMAIN,
SWITCH_DOMAIN,
]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Aqualink component."""
if (conf := config.get(DOMAIN)) is not None:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Aqualink from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
hass.data.setdefault(DOMAIN, {})
# These will contain the initialized devices
binary_sensors = hass.data[DOMAIN][BINARY_SENSOR_DOMAIN] = []
climates = hass.data[DOMAIN][CLIMATE_DOMAIN] = []
lights = hass.data[DOMAIN][LIGHT_DOMAIN] = []
sensors = hass.data[DOMAIN][SENSOR_DOMAIN] = []
switches = hass.data[DOMAIN][SWITCH_DOMAIN] = []
session = async_get_clientsession(hass)
aqualink = AqualinkClient(username, password, session)
try:
await aqualink.login()
except AqualinkServiceException as login_exception:
_LOGGER.error("Failed to login: %s", login_exception)
return False
except (
asyncio.TimeoutError,
aiohttp.client_exceptions.ClientConnectorError,
) as aio_exception:
raise ConfigEntryNotReady(
f"Error while attempting login: {aio_exception}"
) from aio_exception
try:
systems = await aqualink.get_systems()
except AqualinkServiceException as svc_exception:
raise ConfigEntryNotReady(
f"Error while attempting to retrieve systems list: {svc_exception}"
) from svc_exception
systems = list(systems.values())
if not systems:
_LOGGER.error("No systems detected or supported")
return False
# Only supporting the first system for now.
try:
devices = await systems[0].get_devices()
except AqualinkServiceException as svc_exception:
raise ConfigEntryNotReady(
f"Error while attempting to retrieve devices list: {svc_exception}"
) from svc_exception
for dev in devices.values():
if isinstance(dev, AqualinkThermostat):
climates += [dev]
elif isinstance(dev, AqualinkLight):
lights += [dev]
elif isinstance(dev, AqualinkBinarySensor):
binary_sensors += [dev]
elif isinstance(dev, AqualinkSensor):
sensors += [dev]
elif isinstance(dev, AqualinkToggle):
switches += [dev]
forward_setup = hass.config_entries.async_forward_entry_setup
if binary_sensors:
_LOGGER.debug("Got %s binary sensors: %s", len(binary_sensors), binary_sensors)
hass.async_create_task(forward_setup(entry, Platform.BINARY_SENSOR))
if climates:
_LOGGER.debug("Got %s climates: %s", len(climates), climates)
hass.async_create_task(forward_setup(entry, Platform.CLIMATE))
if lights:
_LOGGER.debug("Got %s lights: %s", len(lights), lights)
hass.async_create_task(forward_setup(entry, Platform.LIGHT))
if sensors:
_LOGGER.debug("Got %s sensors: %s", len(sensors), sensors)
hass.async_create_task(forward_setup(entry, Platform.SENSOR))
if switches:
_LOGGER.debug("Got %s switches: %s", len(switches), switches)
hass.async_create_task(forward_setup(entry, Platform.SWITCH))
async def _async_systems_update(now):
"""Refresh internal state for all systems."""
prev = systems[0].online
try:
await systems[0].update()
except AqualinkServiceException as svc_exception:
if prev is not None:
_LOGGER.warning("Failed to refresh iAqualink state: %s", svc_exception)
else:
cur = systems[0].online
if cur is True and prev is not True:
_LOGGER.warning("Reconnected to iAqualink")
async_dispatcher_send(hass, DOMAIN)
async_track_time_interval(hass, _async_systems_update, UPDATE_INTERVAL)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
platforms_to_unload = [
platform for platform in PLATFORMS if platform in hass.data[DOMAIN]
]
del hass.data[DOMAIN]
return await hass.config_entries.async_unload_platforms(entry, platforms_to_unload)
def refresh_system(func):
"""Force update all entities after state change."""
@wraps(func)
async def wrapper(self, *args, **kwargs):
"""Call decorated function and send update signal to all entities."""
await func(self, *args, **kwargs)
async_dispatcher_send(self.hass, DOMAIN)
return wrapper
class AqualinkEntity(Entity):
"""Abstract class for all Aqualink platforms.
Entity state is updated via the interval timer within the integration.
Any entity state change via the iaqualink library triggers an internal
state refresh which is then propagated to all the entities in the system
via the refresh_system decorator above to the _update_callback in this
class.
"""
def __init__(self, dev: AqualinkDevice) -> None:
"""Initialize the entity."""
self.dev = dev
async def async_added_to_hass(self) -> None:
"""Set up a listener when this entity is added to HA."""
self.async_on_remove(
async_dispatcher_connect(self.hass, DOMAIN, self.async_write_ha_state)
)
@property
def should_poll(self) -> bool:
"""Return False as entities shouldn't be polled.
Entities are checked periodically as the integration runs periodic
updates on a timer.
"""
return False
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self.dev.system.serial}_{self.dev.name}"
@property
def assumed_state(self) -> bool:
"""Return whether the state is based on actual reading from the device."""
return self.dev.system.online in [False, None]
@property
def available(self) -> bool:
"""Return whether the device is available or not."""
return self.dev.system.online is True
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Jandy",
model=self.dev.__class__.__name__.replace("Aqualink", ""),
name=self.name,
via_device=(DOMAIN, self.dev.system.serial),
)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the Apache License 2.0.
import random
import os
import azext_aro.vendored_sdks.azure.mgmt.redhatopenshift.v2020_04_30.models as v2020_04_30
from azext_aro._aad import AADManager
from azext_aro._rbac import assign_contributor_to_vnet, assign_contributor_to_routetable
from azext_aro._validators import validate_subnets
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from msrest.exceptions import HttpOperationError
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, parse_resource_id
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
FP_CLIENT_ID = 'f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875'
def aro_create(cmd, # pylint: disable=too-many-locals
client,
resource_group_name,
resource_name,
master_subnet,
worker_subnet,
vnet=None,
vnet_resource_group_name=None, # pylint: disable=unused-argument
location=None,
pull_secret=None,
domain=None,
cluster_resource_group=None,
client_id=None,
client_secret=None,
pod_cidr=None,
service_cidr=None,
master_vm_size=None,
worker_vm_size=None,
worker_vm_disk_size_gb=None,
worker_count=None,
apiserver_visibility=None,
ingress_visibility=None,
tags=None,
no_wait=False):
if not rp_mode_development():
resource_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
provider = resource_client.providers.get('Microsoft.RedHatOpenShift')
if provider.registration_state != 'Registered':
raise CLIError('Microsoft.RedHatOpenShift provider is not registered. Run `az provider ' +
'register -n Microsoft.RedHatOpenShift --wait`.')
vnet = validate_subnets(master_subnet, worker_subnet)
subscription_id = get_subscription_id(cmd.cli_ctx)
random_id = generate_random_id()
aad = AADManager(cmd.cli_ctx)
if client_id is None:
app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
client_id = app.app_id
client_sp = aad.get_service_principal(client_id)
if not client_sp:
client_sp = aad.create_service_principal(client_id)
rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID)
rp_client_sp = aad.get_service_principal(rp_client_id)
for sp_id in [client_sp.object_id, rp_client_sp.object_id]:
assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id)
assign_contributor_to_routetable(cmd.cli_ctx, [master_subnet, worker_subnet], sp_id)
if rp_mode_development():
worker_vm_size = worker_vm_size or 'Standard_D2s_v3'
else:
worker_vm_size = worker_vm_size or 'Standard_D4s_v3'
if apiserver_visibility is not None:
apiserver_visibility = apiserver_visibility.capitalize()
if ingress_visibility is not None:
ingress_visibility = ingress_visibility.capitalize()
oc = v2020_04_30.OpenShiftCluster(
location=location,
tags=tags,
cluster_profile=v2020_04_30.ClusterProfile(
pull_secret=pull_secret or "",
domain=domain or random_id,
resource_group_id='/subscriptions/%s/resourceGroups/%s' %
(subscription_id, cluster_resource_group or "aro-" + random_id),
),
service_principal_profile=v2020_04_30.ServicePrincipalProfile(
client_id=client_id,
client_secret=client_secret,
),
network_profile=v2020_04_30.NetworkProfile(
pod_cidr=pod_cidr or '10.128.0.0/14',
service_cidr=service_cidr or '172.30.0.0/16',
),
master_profile=v2020_04_30.MasterProfile(
vm_size=master_vm_size or 'Standard_D8s_v3',
subnet_id=master_subnet,
),
worker_profiles=[
v2020_04_30.WorkerProfile(
name='worker', # TODO: 'worker' should not be hard-coded
vm_size=worker_vm_size,
disk_size_gb=worker_vm_disk_size_gb or 128,
subnet_id=worker_subnet,
count=worker_count or 3,
)
],
apiserver_profile=v2020_04_30.APIServerProfile(
visibility=apiserver_visibility or 'Public',
),
ingress_profiles=[
v2020_04_30.IngressProfile(
name='default', # TODO: 'default' should not be hard-coded
visibility=ingress_visibility or 'Public',
)
],
)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=oc)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False):
# TODO: clean up rbac
try:
oc = client.get(resource_group_name, resource_name)
master_subnet = oc.master_profile.subnet_id
worker_subnets = {w.subnet_id for w in oc.worker_profiles}
master_parts = parse_resource_id(master_subnet)
vnet = resource_id(
subscription=master_parts['subscription'],
resource_group=master_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=master_parts['name'],
)
aad = AADManager(cmd.cli_ctx)
rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID)
rp_client_sp = aad.get_service_principal(rp_client_id)
# Customers frequently remove the RP's permissions, then cannot
# delete the cluster. Where possible, fix this before attempting
# deletion.
if rp_client_sp:
sp_id = rp_client_sp.object_id
assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id)
assign_contributor_to_routetable(cmd.cli_ctx,
worker_subnets | {master_subnet},
sp_id)
except (CloudError, HttpOperationError) as e:
# Default to old deletion behaviour in case operations throw an
# exception above. Log the error.
logger.info(e.message)
return sdk_no_wait(no_wait, client.delete,
resource_group_name=resource_group_name,
resource_name=resource_name)
def aro_list(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def aro_show(client, resource_group_name, resource_name):
return client.get(resource_group_name, resource_name)
def aro_list_credentials(client, resource_group_name, resource_name):
return client.list_credentials(resource_group_name, resource_name)
def aro_update(client, resource_group_name, resource_name, no_wait=False):
oc = v2020_04_30.OpenShiftClusterUpdate()
return sdk_no_wait(no_wait, client.update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=oc)
def rp_mode_development():
return os.environ.get('RP_MODE', '').lower() == 'development'
def generate_random_id():
random_id = (random.choice('abcdefghijklmnopqrstuvwxyz') +
''.join(random.choice('abcdefghijklmnopqrstuvwxyz1234567890')
for _ in range(7)))
return random_id
|
'''
Classes for providing external input to a network.
'''
from .binomial import *
from .poissongroup import *
from .poissoninput import *
from .spikegeneratorgroup import *
from .timedarray import *
|
from __future__ import division
import numpy as np
from scipy.constants import R
from scipy.integrate import quad
__all__ = [
'temp_integral',
'time_integral',
'senumyang',
'timeint'
]
def temp_integral(E, T):
"""Evaluates the temperature integral with numerical quadrature.
Params
------
E: Activation Energy (Joule/mol)
T: Absolute Temperature (Kelvin)
"""
x = E / (R*T)
return E/R * quad(_inner_integral, np.inf, x)
def _inner_integral(x):
"""temp_integral's inner function"""
return np.exp(x) / x**2
def time_integral():
"""A time version of the temperature integral for use in
Vyazovkin's Method"""
pass
def timeint(t, Ea, T):
"""Integral for vkin_iso"""
return np.exp(-Ea / (R*T))
def senumyang(x):
"""Senum-Yang temperature integral approximation. x = Ea / (R*T)"""
t1 = np.exp(-x) / x
return t1 * (x**3 + 18*x**2 + 86*x + 96) / (x**4 + 20*x**3 + 120*x**2 + 240*x + 120)
|
from setuptools import setup, find_packages
setup(
name="repeating_timer",
version=0.2,
author="Quin Marilyn",
author_email="quin.marilyn05@gmail.com",
description="Run a function over and over in a thread",
long_description=open("readme.md", "r").read(),
long_description_content_type="text/markdown",
url="http://github.com/TheQuinbox/repeating_timer",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
],
python_requires=">=3",
)
|
import sys
from doctest import testmod
import numpy
import einops
import einops.layers
import einops.parsing
from einops._backends import AbstractBackend
from einops.einops import rearrange, parse_shape, _optimize_transformation
from . import collect_test_backends
__author__ = 'Alex Rogozhnikov'
def test_doctests_examples():
if sys.version_info >= (3, 6):
# python 3.5 and lower do not keep ordered dictionaries
testmod(einops.layers, raise_on_error=True, extraglobs=dict(np=numpy))
testmod(einops.einops, raise_on_error=True, extraglobs=dict(np=numpy))
def test_backends_installed():
"""
This test will fail if some of backends are not installed or can't be imported
Other tests will just work and only test installed backends.
"""
from . import skip_cupy
errors = []
for backend_type in AbstractBackend.__subclasses__():
if skip_cupy and backend_type.framework_name == 'cupy':
continue
try:
# instantiate
backend_type()
except Exception as e:
errors.append(e)
assert len(errors) == 0, errors
def test_optimize_transformations_numpy():
print('Testing optimizations')
shapes = [[2] * n_dimensions for n_dimensions in range(14)]
shapes += [[3] * n_dimensions for n_dimensions in range(6)]
shapes += [[2, 3, 5, 7]]
shapes += [[2, 3, 5, 7, 11, 17]]
for shape in shapes:
for attempt in range(5):
n_dimensions = len(shape)
x = numpy.random.randint(0, 2 ** 12, size=shape).reshape([-1])
init_shape = shape[:]
n_reduced = numpy.random.randint(0, n_dimensions + 1)
reduced_axes = tuple(numpy.random.permutation(n_dimensions)[:n_reduced])
axes_reordering = numpy.random.permutation(n_dimensions - n_reduced)
final_shape = numpy.random.randint(0, 1024, size=333) # just random
init_shape2, reduced_axes2, axes_reordering2, final_shape2 = combination2 = \
_optimize_transformation(init_shape, reduced_axes, axes_reordering, final_shape)
assert numpy.array_equal(final_shape, final_shape2)
result1 = x.reshape(init_shape).sum(axis=reduced_axes).transpose(axes_reordering).reshape([-1])
result2 = x.reshape(init_shape2).sum(axis=reduced_axes2).transpose(axes_reordering2).reshape([-1])
assert numpy.array_equal(result1, result2)
# testing we can't optimize this formula again
combination3 = _optimize_transformation(*combination2)
for a, b in zip(combination2, combination3):
assert numpy.array_equal(a, b)
def test_parse_shape_imperative():
backends = collect_test_backends(symbolic=False, layers=False)
backends += collect_test_backends(symbolic=False, layers=True)
for backend in backends:
print('Shape parsing for ', backend.framework_name)
x = numpy.zeros([10, 20, 30, 40])
parsed1 = parse_shape(x, 'a b c d')
parsed2 = parse_shape(backend.from_numpy(x), 'a b c d')
assert parsed1 == parsed2 == dict(a=10, b=20, c=30, d=40)
assert parsed1 != dict(a=1, b=20, c=30, d=40) != parsed2
parsed1 = parse_shape(x, '_ _ _ _')
parsed2 = parse_shape(backend.from_numpy(x), '_ _ _ _')
assert parsed1 == parsed2 == dict()
parsed1 = parse_shape(x, '_ _ _ hello')
parsed2 = parse_shape(backend.from_numpy(x), '_ _ _ hello')
assert parsed1 == parsed2 == dict(hello=40)
parsed1 = parse_shape(x, '_ _ a1 a1a111a')
parsed2 = parse_shape(backend.from_numpy(x), '_ _ a1 a1a111a')
assert parsed1 == parsed2 == dict(a1=30, a1a111a=40)
def test_parse_shape_symbolic():
backends = collect_test_backends(symbolic=True, layers=False)
backends += collect_test_backends(symbolic=True, layers=True)
for backend in backends:
if backend.framework_name == 'keras':
# need special way to compile, shape vars can be used only inside layers
continue
print('special shape parsing for', backend.framework_name)
input_symbols = [
backend.create_symbol([10, 20, 30, 40]),
backend.create_symbol([10, 20, None, None]),
backend.create_symbol([None, None, None, None]),
]
if backend.framework_name in ['mxnet.symbol']:
# mxnet can't normally run inference
input_symbols = [backend.create_symbol([10, 20, 30, 40])]
for input_symbol in input_symbols:
shape_placeholder = parse_shape(input_symbol, 'a b c d')
shape = {}
for name, symbol in shape_placeholder.items():
shape[name] = symbol if isinstance(symbol, int) \
else backend.eval_symbol(symbol, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
print(shape)
result_placeholder = rearrange(input_symbol, 'a b (c1 c2) (d1 d2) -> (a b d1) c1 (c2 d2)',
**parse_shape(input_symbol, 'a b c1 _'), d2=2)
result = backend.eval_symbol(result_placeholder, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
print(result.shape)
assert result.shape == (10 * 20 * 20, 30, 1 * 2)
assert numpy.allclose(result, 0)
def test_is_float_type():
backends = collect_test_backends(symbolic=False, layers=False)
backends += collect_test_backends(symbolic=False, layers=True)
for backend in backends:
for dtype in ['int32', 'int64', 'float32', 'float64']:
is_float = 'float' in dtype
input = numpy.zeros([3, 4, 5], dtype=dtype)
input = backend.from_numpy(input)
if 'chainer' in backend.framework_name and not is_float:
continue # chainer doesn't allow non-floating tensors
assert backend.is_float_type(input) == is_float, (dtype, backend, input.dtype)
|
import csv
import re
import sys
filename = sys.argv[1]
print('Parsing ' + filename + '...')
csv_contents = open(r''+filename + '.csv', "wb")
writer = csv.writer(csv_contents)
file = open(filename, "r")
file.readline()
file.readline()
file.readline()
#Write data headers
writer.writerow(['temperature', 'iterations', 'current_distance', 'shorter_routes_accepted', 'longer_routers_accepted'])
# Read file
currentLineToWrite = []
rowDone = False
started = False
finished = False
with file:
for line in file:
line = line.strip()
if(line.startswith('Temperature')):
if(started != True):
started = True
data = line.split(':', 1)[1]
currentLineToWrite.extend([data])
elif(line.startswith('Iterations')):
data = line.split(':', 1)[1]
currentLineToWrite.extend([data])
elif(line.startswith('Current distance')):
data = line.split(':', 1)[1]
currentLineToWrite.extend([data])
elif(line.startswith('Shorter routes accepted')):
data = line.split(':', 1)[1]
currentLineToWrite.extend([data])
elif(line.startswith('Longer routes accepted')):
data = line.split(':', 1)[1]
currentLineToWrite.extend([data])
finished = True
if(started and finished):
rowDone = True
elif(line.startswith('Total')):
started = True
label,data = line.split(':')
currentLineToWrite.extend([label])
currentLineToWrite.extend([data])
finished = True
rowDone = True
elif(line.startswith('Initial')):
label,data = line.split(':')
currentLineToWrite.extend([label])
currentLineToWrite.extend([data])
finished = True
rowDone = True
elif(line.startswith('numNodes')):
args = re.split('=|;', line)
args = [x.strip(' ') for x in args]
currentLineToWrite.extend(args)
finished = True
rowDone = True
elif(line.startswith('Shortest distance')):
label,data = line.split(':')
currentLineToWrite.extend([label])
currentLineToWrite.extend([data])
finished = True
rowDone = True
elif(line.startswith('Execution time')):
label,data = line.split(':')
currentLineToWrite.extend([label])
data = data.split()[0]
currentLineToWrite.extend([data])
finished = True
rowDone = True
if(rowDone):
writer.writerows([currentLineToWrite])
currentLineToWrite = []
rowDone = False
started = False
finished = False
file.close()
print('Done')
|
# Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from ducktape.cluster.cluster import Cluster
from ducktape.cluster.remoteaccount import RemoteAccount
FakeTestContext = collections.namedtuple('FakeTestContext', ['expected_node_spec'])
FakeRemoteAccount = collections.namedtuple('FakeRemoteAccount', ['operating_system'])
class CheckCluster(object):
def setup_method(self, _):
self.cluster = Cluster()
self.cluster._in_use_nodes = []
self.cluster._available_nodes = [
FakeRemoteAccount(operating_system=RemoteAccount.LINUX),
FakeRemoteAccount(operating_system=RemoteAccount.LINUX),
FakeRemoteAccount(operating_system=RemoteAccount.LINUX),
FakeRemoteAccount(operating_system=RemoteAccount.WINDOWS),
FakeRemoteAccount(operating_system=RemoteAccount.WINDOWS),
FakeRemoteAccount(operating_system=RemoteAccount.WINDOWS)
]
self.test_list = [
FakeTestContext(expected_node_spec={RemoteAccount.LINUX: 2, RemoteAccount.WINDOWS: 2}),
FakeTestContext(expected_node_spec={RemoteAccount.LINUX: 5, RemoteAccount.WINDOWS: 2}),
FakeTestContext(expected_node_spec={RemoteAccount.LINUX: 5, RemoteAccount.WINDOWS: 5}),
FakeTestContext(expected_node_spec={RemoteAccount.LINUX: 3, RemoteAccount.WINDOWS: 3}),
]
def check_enough_capacity(self):
assert self.cluster.test_capacity_comparison(self.test_list[0]) > 0
def check_not_enough_capacity(self):
assert self.cluster.test_capacity_comparison(self.test_list[1]) < 0
assert self.cluster.test_capacity_comparison(self.test_list[2]) < 0
def check_exact_capacity(self):
assert self.cluster.test_capacity_comparison(self.test_list[3]) == 0
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def FileBackedVirtualDiskSpec(vim, *args, **kwargs):
'''Specification used to create a file based virtual disk'''
obj = vim.client.factory.create('{urn:vim25}FileBackedVirtualDiskSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'capacityKb', 'adapterType', 'diskType' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
from OpenGL.GLUT import glutPostRedisplay
class MouseDelegate(object):
def onMouse(self, *args):
raise NotImplementedError
class KeyDelegate(object):
def onKey(self, *args):
raise NotImplementedError
g_events = []
def null_action():
return False
class Event(object):
def __init__(self, _type, msg='', action=null_action):
self.action = action # callable, will return True if redraw needed.
self._type = _type
self.msg = msg
def post_event(event):
g_events.append(event)
def consume_events(app):
for e in g_events[:]:
app.handle_event(e)
g_events.pop(0)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test code in monitoring.py."""
from unittest import mock
from gcpdiag.queries import apis_stub, monitoring
DUMMY_PROJECT_NAME = 'gcpdiag-gce1-aaaa'
DUMMY_INSTANCE_NAME = 'gce1'
DUMMY_ZONE = 'europe-west4-a'
@mock.patch('gcpdiag.queries.apis.get_api', new=apis_stub.get_api_stub)
class Test:
def test_timeserie(self):
ts_col = monitoring.query(DUMMY_PROJECT_NAME,
'mocked query (this is ignored)')
fs = frozenset({
f'resource.zone:{DUMMY_ZONE}',
f'metric.instance_name:{DUMMY_INSTANCE_NAME}'
})
assert fs in ts_col.keys()
value = ts_col[fs]
# expected data:
# {
# 'labels': {
# 'resource.zone': 'europe-west4-a',
# 'metric.instance_name': 'gce1'
# },
# 'start_time': '2021-05-19T15:40:31.414435Z',
# 'end_time': '2021-05-19T15:45:31.414435Z',
# 'values': [[10917.0, 5], [11187.0, 4]]
# }
assert value['labels']['metric.instance_name'] == 'gce1'
assert 'start_time' in value
assert 'end_time' in value
assert isinstance(value['values'][0][0], float)
assert isinstance(value['values'][1][0], float)
assert isinstance(value['values'][0][1], int)
assert isinstance(value['values'][1][1], int)
|
#!/usr/bin/env python3
import requests
def batch_request(node_ip, user_pass, requests_list):
r = requests.post(node_ip, json=requests_list)
return r
def get_orderbook(node_ip, user_pass, base, rel):
params = {'userpass': user_pass,
'method': 'orderbook',
'base': base, 'rel': rel,}
r = requests.post(node_ip, json=params)
return r
|
""" add language server support to the running jupyter notebook application
"""
import json
import traitlets
from .handlers import add_handlers
from .manager import LanguageServerManager
from .paths import normalized_uri
def load_jupyter_server_extension(nbapp):
""" create a LanguageServerManager and add handlers
"""
nbapp.add_traits(language_server_manager=traitlets.Instance(LanguageServerManager))
manager = nbapp.language_server_manager = LanguageServerManager(parent=nbapp)
manager.initialize()
contents = nbapp.contents_manager
page_config = nbapp.web_app.settings.setdefault("page_config_data", {})
# try to set the rootUri from the contents manager path
if hasattr(contents, "root_dir"):
page_config["rootUri"] = normalized_uri(contents.root_dir)
nbapp.log.debug("[lsp] rootUri will be %s", page_config["rootUri"])
else: # pragma: no cover
nbapp.log.warn(
"[lsp] %s did not appear to have a root_dir, could not set rootUri",
contents,
)
add_handlers(nbapp)
nbapp.log.debug(
"[lsp] The following Language Servers will be available: {}".format(
json.dumps(manager.language_servers, indent=2, sort_keys=True)
)
)
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# adserver.py
#
# Server that creates/hosts online ads, using PageBot as engine.
# http://localhost:5555/index.html
#
import sys
import json
import tornado.httpserver
import tornado.ioloop
import tornado.web
#from pagebot.server.pagebothandler import PageBotHandler
from pagebot.contexts import HtmlContext
from pagebot.web.nanosite.nanosite import NanoSite
PORT = 5555
SSLPORT = 443
regex = r"/([^/]+)"
context = HtmlContext()
class AdHandler(tornado.web.RequestHandler):
def get(self, slug):
print('get %s' % slug)
html = self.get_html()
self.write(html)
def post(self, slug):
print('post %s' % slug)
data = json.loads(self.request.body.decode('utf-8'))
print('Got JSON data:', data)
self.write({ 'got' : 'your data' })
def get_html(self):
self.site = NanoSite(context=context)
html = ''
html += '<html><head><title>PageBot Server</title></head><body>'
html += "<h1>Hello world</h1>"
html += '</body></html>'
return html
def data_received(self):
pass
class AdServer:
def __init__(self, args=None, cert=None, key=None):
#self.handlers = [('/', PageBotHandler),]
self.handlers = [(regex, AdHandler),]
if args and '--port' in args:
#try:
port = int(args[-1])
self.port = port
#except Exception as e:
# self.port = PORT
# print(e)
else:
self.port = PORT
self.app = tornado.web.Application(self.handlers)
if cert and key:
ssl_options = {
"certfile": cert,
"keyfile": key,
}
http_server = tornado.httpserver.HTTPServer(self.app, ssl_options=ssl_options)
http_server.listen(SSLPORT)
else:
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.port)
#self.app.listen(self.port)
def run(self):
print('Starting PageBot/Tornado web application on port %s' % self.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
args = sys.argv[1:]
server = AdServer(args=args)
server.run()
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import operator
import os
import re
import subprocess
#
# Find the root of the git tree
#
git_root = (subprocess.check_output(['git', 'rev-parse', '--show-toplevel'
]).decode('utf-8').strip())
#
# Parse command line arguments
#
default_out = os.path.join(git_root, '.github', 'CODEOWNERS')
argp = argparse.ArgumentParser('Generate .github/CODEOWNERS file')
argp.add_argument('--out',
'-o',
type=str,
default=default_out,
help='Output file (default %s)' % default_out)
args = argp.parse_args()
#
# Walk git tree to locate all OWNERS files
#
owners_files = [
os.path.join(root, 'OWNERS')
for root, dirs, files in os.walk(git_root)
if 'OWNERS' in files
]
#
# Parse owners files
#
Owners = collections.namedtuple('Owners', 'parent directives dir')
Directive = collections.namedtuple('Directive', 'who globs')
def parse_owners(filename):
with open(filename) as f:
src = f.read().splitlines()
parent = True
directives = []
for line in src:
line = line.strip()
# line := directive | comment
if not line:
continue
if line[0] == '#':
continue
# it's a directive
directive = None
if line == 'set noparent':
parent = False
elif line == '*':
directive = Directive(who='*', globs=[])
elif ' ' in line:
(who, globs) = line.split(' ', 1)
globs_list = [glob for glob in globs.split(' ') if glob]
directive = Directive(who=who, globs=globs_list)
else:
directive = Directive(who=line, globs=[])
if directive:
directives.append(directive)
return Owners(parent=parent,
directives=directives,
dir=os.path.relpath(os.path.dirname(filename), git_root))
owners_data = sorted([parse_owners(filename) for filename in owners_files],
key=operator.attrgetter('dir'))
#
# Modify owners so that parented OWNERS files point to the actual
# Owners tuple with their parent field
#
new_owners_data = []
for owners in owners_data:
if owners.parent == True:
best_parent = None
best_parent_score = None
for possible_parent in owners_data:
if possible_parent is owners:
continue
rel = os.path.relpath(owners.dir, possible_parent.dir)
# '..' ==> we had to walk up from possible_parent to get to owners
# ==> not a parent
if '..' in rel:
continue
depth = len(rel.split(os.sep))
if not best_parent or depth < best_parent_score:
best_parent = possible_parent
best_parent_score = depth
if best_parent:
owners = owners._replace(parent=best_parent.dir)
else:
owners = owners._replace(parent=None)
new_owners_data.append(owners)
owners_data = new_owners_data
#
# In bottom to top order, process owners data structures to build up
# a CODEOWNERS file for GitHub
#
def full_dir(rules_dir, sub_path):
return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
# glob using git
gg_cache = {}
def git_glob(glob):
global gg_cache
if glob in gg_cache:
return gg_cache[glob]
r = set(
subprocess.check_output([
'git', 'ls-files', os.path.join(git_root, glob)
]).decode('utf-8').strip().splitlines())
gg_cache[glob] = r
return r
def expand_directives(root, directives):
globs = collections.OrderedDict()
# build a table of glob --> owners
for directive in directives:
for glob in directive.globs or ['**']:
if glob not in globs:
globs[glob] = []
if directive.who not in globs[glob]:
globs[glob].append(directive.who)
# expand owners for intersecting globs
sorted_globs = sorted(globs.keys(),
key=lambda g: len(git_glob(full_dir(root, g))),
reverse=True)
out_globs = collections.OrderedDict()
for glob_add in sorted_globs:
who_add = globs[glob_add]
pre_items = [i for i in out_globs.items()]
out_globs[glob_add] = who_add.copy()
for glob_have, who_have in pre_items:
files_add = git_glob(full_dir(root, glob_add))
files_have = git_glob(full_dir(root, glob_have))
intersect = files_have.intersection(files_add)
if intersect:
for f in sorted(files_add): # sorted to ensure merge stability
if f not in intersect:
out_globs[os.path.relpath(f, start=root)] = who_add
for who in who_have:
if who not in out_globs[glob_add]:
out_globs[glob_add].append(who)
return out_globs
def add_parent_to_globs(parent, globs, globs_dir):
if not parent:
return
for owners in owners_data:
if owners.dir == parent:
owners_globs = expand_directives(owners.dir, owners.directives)
for oglob, oglob_who in owners_globs.items():
for gglob, gglob_who in globs.items():
files_parent = git_glob(full_dir(owners.dir, oglob))
files_child = git_glob(full_dir(globs_dir, gglob))
intersect = files_parent.intersection(files_child)
gglob_who_orig = gglob_who.copy()
if intersect:
for f in sorted(files_child
): # sorted to ensure merge stability
if f not in intersect:
who = gglob_who_orig.copy()
globs[os.path.relpath(f, start=globs_dir)] = who
for who in oglob_who:
if who not in gglob_who:
gglob_who.append(who)
add_parent_to_globs(owners.parent, globs, globs_dir)
return
assert (False)
todo = owners_data.copy()
done = set()
with open(args.out, 'w') as out:
out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
out.write('# Uses OWNERS files in different modules throughout the\n')
out.write('# repository as the source of truth for module ownership.\n')
written_globs = []
while todo:
head, *todo = todo
if head.parent and not head.parent in done:
todo.append(head)
continue
globs = expand_directives(head.dir, head.directives)
add_parent_to_globs(head.parent, globs, head.dir)
for glob, owners in globs.items():
skip = False
for glob1, owners1, dir1 in reversed(written_globs):
files = git_glob(full_dir(head.dir, glob))
files1 = git_glob(full_dir(dir1, glob1))
intersect = files.intersection(files1)
if files == intersect:
if sorted(owners) == sorted(owners1):
skip = True # nothing new in this rule
break
elif intersect:
# continuing would cause a semantic change since some files are
# affected differently by this rule and CODEOWNERS is order dependent
break
if not skip:
out.write('/%s %s\n' %
(full_dir(head.dir, glob), ' '.join(owners)))
written_globs.append((glob, owners, head.dir))
done.add(head.dir)
|
#!/usr/bin/python3
import enum
import pickle
from typing import Tuple
import torch
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils as dist_utils
from torch import Tensor, nn
from torch._jit_internal import Future
from torch.distributed.nn import RemoteModule
from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES
from torch.distributed.nn.api.remote_module import _RemoteModule
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
_PARAM_VAL = torch.nn.Parameter(torch.ones(1))
# RPC handler for querying the device on the destination worker.
def remote_device(module_rref):
for param in module_rref.local_value().parameters():
return param.device
# RPC handler for querying __dict__ on the destination worker.
def remote_module_attributes(remote_module):
return remote_module.__dict__
# RPC handler for running forward on the destination worker.
def remote_forward(remote_module, args):
return remote_module.forward(*args)
# RPC handler for running forward_async on the destination worker.
def remote_forward_async(remote_module, args):
# Since future cannot be pickled and sent over the RPC layer,
# have to wait and behave just like ``forward_sync``.
return remote_module.forward_async(*args).wait()
# RPC handler for creating a remote module by module rref on the destination worker.
def create_remote_module_by_module_rref(remote_device, module_rref):
return RemoteModule(remote_device=remote_device, module_rref=module_rref)
class ModuleCreationMode(enum.Enum):
MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface"
MODULE_CTOR = "module_ctor"
@torch.jit.interface
class MyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
@torch.jit.interface
class RemoteMyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
def forward_async(
self, tensor: Tensor, number: int, word: str = "default"
) -> Future[Tuple[str, int, Tensor]]:
pass
class MyModule(nn.Module):
def __init__(self, first_arg, first_kwarg=-1):
super().__init__()
self.param1 = _PARAM_VAL
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
return word, number, tensor
class BadModule:
def __init__(self, first_arg, first_kwarg=-1):
pass
def create_scripted_module(first_arg, first_kwarg=-1):
module = MyModule(first_arg, first_kwarg=first_kwarg)
scripted_module = torch.jit.script(module)
return scripted_module
# Common utils for both CPU and CUDA test suites
class CommonRemoteModuleTest(RpcAgentTestFixture):
@property
def world_size(self): # Override setting in RpcAgentTestFixture
return 2
@staticmethod
def _create_remote_module_iter(remote_device, modes=None):
if modes is None:
modes = ModuleCreationMode.__members__.values()
args = (1,)
kwargs = dict(first_kwarg=2)
if ModuleCreationMode.MODULE_CTOR in modes:
remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
yield remote_module
if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes:
remote_module = _RemoteModule(
remote_device,
create_scripted_module,
args,
kwargs,
_module_interface_cls=MyModuleInterface,
)
scripted_remote_module = torch.jit.script(remote_module)
yield scripted_remote_module
class RemoteModuleTest(CommonRemoteModuleTest):
@dist_utils.dist_init
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = "{}/cpu".format(dst_worker_name)
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
@dist_utils.dist_init
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
ret = ret_fut.wait()
return ret
ret = run_forward_async(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_sync(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret = remote_module.forward(*args)
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_sync_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_with_kwargs(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2)
kwargs = dict(word="3")
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + ("3",))))
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + ("3",))))
@dist_utils.dist_init
def test_remote_parameters(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
param_rrefs = remote_module.remote_parameters()
self.assertEqual(len(param_rrefs), 1)
self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
@dist_utils.dist_init
def test_get_module_rref(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
rref = remote_module.get_module_rref()
self.assertEqual(rref, remote_module.module_rref)
for param in rref.to_here().parameters():
self.assertTrue(torch.equal(param, _PARAM_VAL))
@dist_utils.dist_init
def test_unsupported_methods(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with self.assertRaisesRegex(
ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
):
remote_module.register_buffer("buffer", torch.ones(5))
with self.assertRaisesRegex(
ValueError,
r"Method ``register_parameter`` not supported for RemoteModule",
):
remote_module.register_parameter(
"param", torch.nn.Parameter(torch.ones(1))
)
with self.assertRaisesRegex(
ValueError, r"Method ``add_module`` not supported for RemoteModule"
):
remote_module.add_module("empty", None)
with self.assertRaisesRegex(
ValueError, r"Method ``apply`` not supported for RemoteModule"
):
fn = torch.rand((3, 3), requires_grad=False)
remote_module.apply(fn)
with self.assertRaisesRegex(
ValueError, r"Method ``cuda`` not supported for RemoteModule"
):
remote_module.cuda()
with self.assertRaisesRegex(
ValueError, r"Method ``cpu`` not supported for RemoteModule"
):
remote_module.cpu()
with self.assertRaisesRegex(
ValueError, r"Method ``type`` not supported for RemoteModule"
):
remote_module.type(torch.FloatTensor)
with self.assertRaisesRegex(
ValueError, r"Method ``float`` not supported for RemoteModule"
):
remote_module.float()
with self.assertRaisesRegex(
ValueError, r"Method ``double`` not supported for RemoteModule"
):
remote_module.double()
with self.assertRaisesRegex(
ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
):
remote_module.bfloat16()
with self.assertRaisesRegex(
ValueError, r"Method ``to`` not supported for RemoteModule"
):
remote_module.to("cpu", dtype=torch.int32)
def hook(module, grad_input, grad_output):
pass
with self.assertRaisesRegex(
ValueError,
r"Method ``register_backward_hook`` not supported for RemoteModule",
):
remote_module.register_backward_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
):
remote_module.register_forward_pre_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_hook`` not supported for RemoteModule",
):
remote_module.register_forward_hook(hook)
with self.assertRaisesRegex(
ValueError, r"Method ``state_dict`` not supported for RemoteModule"
):
remote_module.state_dict()
with self.assertRaisesRegex(
ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
):
remote_module.load_state_dict({})
with self.assertRaisesRegex(
ValueError,
r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
):
remote_module.parameters()
with self.assertRaisesRegex(
ValueError,
r"Method ``named_parameters`` not supported for RemoteModule",
):
remote_module.named_parameters()
with self.assertRaisesRegex(
ValueError, r"Method ``buffers`` not supported for RemoteModule"
):
remote_module.buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
):
remote_module.named_buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``children`` not supported for RemoteModule"
):
remote_module.children()
with self.assertRaisesRegex(
ValueError, r"Method ``named_children`` not supported for RemoteModule"
):
remote_module.named_children()
with self.assertRaisesRegex(
ValueError, r"Method ``modules`` not supported for RemoteModule"
):
remote_module.modules()
with self.assertRaisesRegex(
ValueError, r"Method ``named_modules`` not supported for RemoteModule"
):
remote_module.named_modules()
with self.assertRaisesRegex(
ValueError, r"Method ``train`` not supported for RemoteModule"
):
remote_module.train()
with self.assertRaisesRegex(
ValueError, r"Method ``eval`` not supported for RemoteModule"
):
remote_module.eval()
with self.assertRaisesRegex(
ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
):
remote_module.requires_grad_()
with self.assertRaisesRegex(
ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
):
remote_module.zero_grad()
with self.assertRaisesRegex(
ValueError, r"Method ``share_memory`` not supported for RemoteModule"
):
remote_module.share_memory()
with self.assertRaisesRegex(
ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
):
remote_module.extra_repr()
@dist_utils.dist_init
def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# If a new attribute is added to this RemoteModule after the initialization,
# and it will be sent over the wire by RPC,
# this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
# Note that adding a new attribute out of constructor should rarely happen.
# If a new attribute is added to RemoteModule constructor,
# there is a sanity check to enforce developers to add this attribute to either
# _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
new_attr_name = "new_attr"
setattr(remote_module, new_attr_name, 1)
attrs = rpc.rpc_sync(
dst_worker_name, remote_module_attributes, (remote_module,)
)
self.assertNotIn(new_attr_name, attrs)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError,
"Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
):
torch.save(remote_module, fname)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
with TemporaryFileName() as fname:
with self.assertRaises(pickle.PickleError):
torch.save(remote_module, fname)
class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
@property
def world_size(self): # Override setting in CommonRemoteModuleTest
return 3
@dist_utils.dist_init
def test_send_remote_module_over_the_wire(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attribtes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
# Test querying some simple attributes from worker2.
attrs = rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs)
self.assertEqual(attrs["on"], "worker1")
self.assertEqual(attrs["device"], "cpu")
self.assertFalse(attrs["is_device_map_set"])
self.assertFalse(attrs["is_scriptable"])
# Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
# not have another worker to initiate forward over the RPC layer.
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
self.assertEqual(ret1, tuple(reversed(args)))
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward_async, (remote_module, args)
)
self.assertEqual(ret2, tuple(reversed(args)))
@dist_utils.dist_init
def test_send_remote_module_over_the_wire_script_not_supported(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attribtes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
with self.assertRaisesRegex(
RuntimeError, "Passing a script RemoteModule over RPC is not supported."
):
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
# Test querying some simple attributes from worker2.
attrs = rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
@dist_utils.dist_init
def test_create_remote_module_by_module_rref(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module2 = rpc.rpc_sync(
dst_worker2_name,
create_remote_module_by_module_rref,
(dst_worker2_name, remote_module.get_module_rref()),
)
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(
dst_worker1_name, remote_forward, (remote_module, args)
)
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward, (remote_module2, args)
)
self.assertEqual(ret2, ret2)
class CudaRemoteModuleTest(CommonRemoteModuleTest):
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_valid_device(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = dist_utils.worker_name(dst_rank)
for remote_module in self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
# Test rank works as well.
for remote_module in self._create_remote_module_iter(
"rank:{}/cuda:0".format(dst_rank), modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_invalid_devices(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError,
r"Expected one of .+ device type at start of device string",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/foo".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
RuntimeError, r"CUDA error: invalid device ordinal"
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cuda:100".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cpu2".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cuda:0/cuda:1".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"/",
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"/cuda:0",
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device.
t1 = torch.ones(1)
args = (t1, 2)
t2 = t1 * 2
kwargs = dict(word=t2)
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE],
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[2].device.type, "cpu")
|
numero=5
print(numero==5)
numero2=145
print(numero2==145)
num1=input("Dame un numero")
if int(num1)%2==0 :
print("Es par")
else:
print("Es impar")
nombre=input("Dame un nombre")
if nombre=="Raul":
print("Te quiero")
elif nombre=="Juan":
print("Te odio")
else:
print("Me da igual")
if int(num1)%2!=1:
print("Es par")
else:
print("Es impar")
if True: # Algo entra a un if si su operacion condicional da True, sino es False y no entra
print("fjeriugjr")
|
"""
Set up defaults and read sentinel.conf
"""
import sys
import os
from dash_config import DashConfig
default_sentinel_config = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../sentinel.conf')
)
sentinel_config_file = os.environ.get('SENTINEL_CONFIG', default_sentinel_config)
sentinel_cfg = DashConfig.tokenize(sentinel_config_file)
sentinel_version = "1.3.0"
min_dashd_proto_version_with_sentinel_ping = 70207
def get_dash_conf():
if sys.platform == 'win32':
dash_conf = os.path.join(os.getenv('APPDATA'), "DashCore/dash.conf")
else:
home = os.environ.get('HOME')
dash_conf = os.path.join(home, ".dashcore/dash.conf")
if sys.platform == 'darwin':
dash_conf = os.path.join(home, "Library/Application Support/DashCore/dash.conf")
dash_conf = sentinel_cfg.get('dash_conf', dash_conf)
return dash_conf
def get_network():
return sentinel_cfg.get('network', 'mainnet')
def get_rpchost():
return sentinel_cfg.get('rpchost', '127.0.0.1')
def sqlite_test_db_name(sqlite_file_path):
(root, ext) = os.path.splitext(sqlite_file_path)
test_sqlite_file_path = root + '_test' + ext
return test_sqlite_file_path
def get_db_conn():
import peewee
env = os.environ.get('SENTINEL_ENV', 'production')
# default values should be used unless you need a different config for development
db_host = sentinel_cfg.get('db_host', '127.0.0.1')
db_port = sentinel_cfg.get('db_port', None)
db_name = sentinel_cfg.get('db_name', 'sentinel')
db_user = sentinel_cfg.get('db_user', 'sentinel')
db_password = sentinel_cfg.get('db_password', 'sentinel')
db_charset = sentinel_cfg.get('db_charset', 'utf8mb4')
db_driver = sentinel_cfg.get('db_driver', 'sqlite')
if (env == 'test'):
if db_driver == 'sqlite':
db_name = sqlite_test_db_name(db_name)
else:
db_name = "%s_test" % db_name
peewee_drivers = {
'mysql': peewee.MySQLDatabase,
'postgres': peewee.PostgresqlDatabase,
'sqlite': peewee.SqliteDatabase,
}
driver = peewee_drivers.get(db_driver)
dbpfn = 'passwd' if db_driver == 'mysql' else 'password'
db_conn = {
'host': db_host,
'user': db_user,
dbpfn: db_password,
}
if db_port:
db_conn['port'] = int(db_port)
if driver == peewee.SqliteDatabase:
db_conn = {}
db = driver(db_name, **db_conn)
return db
dash_conf = get_dash_conf()
network = get_network()
rpc_host = get_rpchost()
db = get_db_conn()
|
from pyb import UART
# Setup the connection to your GPS here
# This example uses UART 3 with RX on pin Y10
# Baudrate is 9600bps, with the standard 8 bits, 1 stop bit, no parity
uart = UART(3, 9600)
# Basic UART --> terminal printer, use to test your GPS module
while True:
if uart.any():
print(chr(uart.readchar()), end='')
|
import requests
import pprint
import requests
import re
import pandas as pd
from bs4 import BeautifulSoup
import time
# encparam
def get_encparam(code):
url = f"https://navercomp.wisereport.co.kr/v2/company/c1010001.aspx?cmp_cd={code}"
resp = requests.get(url)
text = resp.text
encparam = re.search("encparam: '(.+)'", text)[1].strip()
return encparam
# 네이버 금융 재무상태표
def get_balance_sheet(code):
encparam = get_encparam(code)
url = "https://navercomp.wisereport.co.kr/v2/company/cF3002.aspx"
headers = {
"Host": "navercomp.wisereport.co.kr",
"Referer": f"https://navercomp.wisereport.co.kr/v2/company/c1030001.aspx?cmp_cd={code}&cn=",
"User-Agent": "Mozilla/5.0"
}
params = {
"cmp_cd": f"{code}",
"frq": "0",
"rpt": "1",
"finGubun": "MAIN",
"frqTyp": "0",
"encparam": encparam
}
resp = requests.get(url, headers=headers, params=params)
data = resp.json()
data = data['DATA']
return pd.DataFrame(data)
# 영업이익
def get_operating_profit(code):
try:
# download html
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode={code}"
resp = requests.get(url)
html = resp.text
# scarping by css selector
soup = BeautifulSoup(html, "html5lib")
selector = "#highlight_D_A > table > tbody > tr:nth-child(2) > td:nth-child(4)"
tags = soup.select(selector)
equity = tags[0].text
equity = equity.replace(",", "")
equity = float(equity) * 100000000
return equity
except:
return
# 유통주식수
def get_shares(code):
try:
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode={code}"
resp = requests.get(url)
html = resp.text
selector = "#svdMainGrid1 > table > tbody > tr:nth-child(7) > td:nth-child(2)"
soup = BeautifulSoup(html, "html5lib")
tags = soup.select(selector)
shares = tags[0].text.split('/')[0]
shares = shares.replace(',', '')
shares = float(shares)
except:
shares = 0
try:
selector = "#svdMainGrid5 > table > tbody > tr:nth-child(5) > td:nth-child(3)"
tags = soup.select(selector)
self_shares = tags[0].text
self_shares = self_shares.replace(',', '')
self_shares = float(self_shares)
except:
self_shares = 0
return shares - self_shares
# 종목코드
def get_code_list():
url = "http://comp.fnguide.com/SVO2/common/lookup_data.asp?mkt_gb=1&comp_gb=1"
resp = requests.get(url)
data = resp.json()
return data
if __name__ == "__main__":
df = get_balance_sheet("005930")
df['ACC_NM'] = df['ACC_NM'].str.replace('.', '')
df.set_index('ACC_NM', inplace=True)
df2 = df.loc[['유동자산', '투자자산', '유동부채', '비유동부채'], 'DATA5']
df2.dropna(inplace=True)
재산가치 = df2["유동자산"]-(df2["유동부채"] * 1.2) + df2["투자자산"]
재산가치 = 재산가치 * 100000000
# 비유동부채
비유동부채 = df2["비유동부채"] * 100000000
print("재산가치", 재산가치)
print("비유동부채", 비유동부채)
|
import qrcode
import numpy as np
# data to encode
data = "https://www.thepythoncode.com"
# instantiate QRCode object
qr = qrcode.QRCode(version=1, box_size=10, border=4)
# add data to the QR code
qr.add_data(data)
# compile the data into a QR code array
qr.make()
# print the image shape
print("The shape of the QR image:", np.array(qr.get_matrix()).shape)
# transfer the array into an actual image
img = qr.make_image(fill_color="white", back_color="black")
# save it to a file
img.save("site_inversed.png")
|
import sys
sys.path.append("src/")
sys.path.append("script/")
sys.path.append("script/pipe_line/")
from data_manupulation import data_manupulation
import pandas as pd
import numpy as np
import argparse
from condition_manager import condition_manager
from utils import safe_mkdir
from utils import dict2r1df
import os
from gridmap import grid_map
def stge_main(condition):
print("Start job")
sim_dm = data_manupulation.conduct_sc_ts_sim(
condition["scnum"], condition["refnum"], condition["genenum"],
condition["amplitude"], condition["width"], condition["tgain"])
stge = data_manupulation.initiate_sim_stge(
sim_dm, l_corr=condition["lcorr"], t_corr=condition["tcorr"])
stge = data_manupulation.optimize_stge(
stge, vb_iter=condition["vbiter"], iter_num=condition["optiter"])
copy_condition = dict(condition)
del copy_condition["varkeys"]
del copy_condition["root"]
corr_list = data_manupulation.calculate_corr_list_all_time(stge)
pos_dist_list = data_manupulation.calculate_pos_dist_list_all_time(stge)
copy_condition["corr"] = np.mean(corr_list)
copy_condition["posdist"] = np.mean(pos_dist_list)
return(copy_condition)
def main():
# parse commandline
parser = argparse.ArgumentParser(
description='Distributing STGE comutation to SGE clusters')
parser.add_argument("--genenum", default=[100], type=int, nargs="*",
help="Number of gene")
parser.add_argument("--amplitude", default=[300], type=float, nargs="*",
help="Amplitude of gene expression")
parser.add_argument("--width", default=[200], type=float, nargs="*",
help="Broadness of gene expression")
parser.add_argument("--tgain", default=[1.0], type=float, nargs="*",
help="Time change speed")
parser.add_argument("--lcorr", default=[200], type=float, nargs="*",
help="Lenght of spatial correlaiton")
parser.add_argument("--tcorr", default=[5], type=float, nargs="*",
help="Lenght of time correlaiton")
parser.add_argument("--scnum", default=[1000], type=int, nargs="*",
help="Number of observed single cell expression at one timepoint")
parser.add_argument("--refnum", default=[1000], type=int, nargs="*",
help="Number of cells in simulation")
parser.add_argument("--vbiter", default=[10], type=int, nargs="*",
;;;; help="Iteration of variational bayes")
parser.add_argument("--optiter", default=[10], type=int, nargs="*",
help="Iteration of parameter optimizaiton")
parser.add_argument("--memreq", default="2G", type=str, nargs="*",
help="Memorry requirement fo each job")
parser.add_argument("--root", type=str,
help="Path to root directory")
parser.add_argument("--queue", type=str, default="mjobs.q",
help="Queue type of jobs")
args = parser.parse_args()
cond_manager = condition_manager()
cond_keys = ["genenum", "amplitude", "width", "tgain",
"lcorr", "tcorr", "scnum", "refnum", "vbiter", "optiter"]
cond_manager.load_cmdline(args, keys=cond_keys)
# make root directory
os.mkdir(args.root)
# save experimental condition
params_file_path = "/".join([args.root, "params.json"])
cond_manager.save_as_json(params_file_path)
# convert args to condition list
base_dict = {"root": args.root}
cond_manager.make_cond_dict_list(base_dict)
# apply stge_maint to condition_list
job_name = args.root.split("/")[-1]
if len(job_name) == 0: # the case the end of root is /
job_name = args.root.split("/")[-2]
temp_dir = "/".join([args.root, "tmp/"])
print(job_name)
condition_list = grid_map(
stge_main, cond_manager.cond_dict_list,
mem_free=args.memreq, name=job_name, temp_dir=temp_dir,
queue=args.queue)
accuracy_df_list = [dict2r1df(condition) for condition in condition_list]
accuracy_df = pd.concat(accuracy_df_list)
file_path = args.root + "/accuracy.csv"
accuracy_df.to_csv(file_path)
if __name__ == "__main__":
main()
|
from ase import Atoms
from gpaw import GPAW, PW
"""
A simple script to run an H2 calculation
with GPAW using a PW basis and writing outputs
to a specific file called out.txt
"""
# set up the H2 molecule
h2 = Atoms('H2', [(0, 0, 0), (0, 0, 0.74)])
h2.center(vacuum=2.5)
# define the calculator
h2.calc = GPAW(xc='PBE', mode=PW(300), txt='aiida_gpaw.txt')
# One of the ways to "start" the calculation
h2.get_potential_energy()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
PYTHON_VERSION_COMPATIBILITY = "PY3"
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'depot_tools/git',
'depot_tools/tryserver',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
|
from argparse import ArgumentParser
from multiprocessing import Pool
from termcolor import colored
from rfc3987 import parse
import itertools
import requests
import sys
import re
#import urllib3
#urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def print_banner():
print('''\nCloudScraper is a tool to search through the source code of websites in order to find cloud resources belonging to a target.
by Jordan Potti
@ok_bye_now
Modified version : lutzenfried\n'''
)
def checker(url):
'''
Check if the url is a valid one or not.
'''
try:
parse(url)
return True
except ValueError:
return False
return False
def gather_links(html):
'''
Apply to the raw HTML a regular expression to gather all the urls.
'''
urls = []
links_ = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', html)
urls.extend(filter(checker, links_)) #filter the ones that don't compile with the checker function
del(links_)
return list(set(urls))
def start(target):
'''
Load the initial url and gather the first urls that will be used
by the spider to keep looking for more links
'''
print(colored("Beginning search for cloud resources in {}".format(target), color='cyan'))
try:
html = requests.get(target, allow_redirects=True, headers=headers, verify=True).text
links = gather_links(html)
except requests.exceptions.RequestException as e:
if arguments.v:
print(colored('Network error: {}'.format(e), 'red', attrs=['bold']))
return
print(colored('Initial links: {}\n'.format(len(links)), color='cyan'))
spider(links, target)
def worker(url):
'''
Function handling all the crawling action of the spider.
It first checks the desired depth and if the domain of
the url matches the target to avoid crawling other web sites.
Makes a GET request, parses the HTML and returns all the links.
'''
if url.count("/") <= arguments.depth+2:
try:
html = requests.get(url, allow_redirects=True, headers=headers, verify=True).text
links = gather_links(html)
except requests.exceptions.RequestException as e:
if arguments.v:
print(colored('Network error: {}'.format(e), 'red', attrs=['bold']))
return []
print('{} links found [{}]'.format(len(links), url))
return links
else:
return []
def spider(base_urls, target):
'''
Loop through the initial links found in the given page. Each new link
discovered will be added to the list if it's not already there, and thus
crawled aswell looking for more links.
wannabe list works as the placeholder for the urls that are yet to crawl.
base_urls is a list with all the already crawled urls.
'''
global target_
target_ = parse(target)
p = Pool(arguments.process)
wannabe = [url for url in base_urls if target_['authority'] in parse(url)['authority']]
while True:
#retrieve all the urls returned by the workers
new_urls = p.map(worker, wannabe)
#flatten them and remove repeated ones
new_urls = list(set(itertools.chain(*new_urls)))
wannabe = []
i = 0
#if new_urls is empty meaning no more urls are being discovered, exit the loop
if new_urls == []:
break
else:
for url in new_urls:
if url not in base_urls:
'''
For each new url, check if it hasn't been crawled. If it's
indeed new and contains the target domain it gets appended to
the wannabe list so in the next iteration it will be crawled.
'''
i += 1
if target_['authority'] in parse(url)['authority']:
wannabe.append(url)
base_urls.append(url)
print(colored('\nNew urls appended: {}\n'.format(i), 'green', attrs=['bold']))
p.close()
p.join()
#once all the links for the given depth have been analyzed, execute the parser
parser(base_urls)
def parser(links):
'''
Once all the links have been gathered check how many of them
match with the list of cloud domains we are interested in.
'''
print(colored('Parsing results...', 'cyan', attrs=['bold']))
cloud_domains = ['amazonaws.com', 'digitaloceanspaces.com', 'windows.net', 'storage.googleapis.com', 'aliyuncs.com', 'bc.googleusercontent.com','appspot.com','run.app','firebaseio.com','cloudfunctions.net','azurewebsites.net','cloudapp.net','cloudapp.azure.com']
matches = []
[[matches.append(link) for link in links if cloud_domain in link] for cloud_domain in cloud_domains]
matches = list(set(matches))
print('\nTotal links: ', len(links))
if len(matches) == 0:
print(colored("There were no matches!", 'red', attrs=['bold']))
else:
print(colored("There were {} matches for this search!".format(len(matches)), 'green', attrs=['bold']))
[print(match, "\n") for match in matches]
def args():
parser = ArgumentParser()
parser.add_argument("-u", dest="URL", required=False, help="Target Scope")
parser.add_argument("-d", dest="depth", type=int, required=False, default=5, help="Max Depth of links Default: 5")
parser.add_argument("-l", dest="targetlist", required=False, help="Location of text file of Line Delimited targets")
parser.add_argument("-v", action="store_true", default=False, required=False, help="Verbose output")
#parser.add_argument("-t", dest="time", required=False, default=0, help="Time between GETs to avoid getting blocked")
parser.add_argument("-p", dest="process", required=False, default=2, type=int, help="Number of processes to run")
if len(sys.argv) == 1:
parser.error("No arguments given.")
parser.print_usage
sys.exit()
#ouput parsed arguments into a usable object
return parser.parse_args()
def cleaner(url):
if 'http' not in url:
return ("https://"+url).strip()
else:
return url.strip()
def main():
if arguments.targetlist:
with open (arguments.targetlist, 'r') as target_list:
[start(cleaner(line)) for line in target_list]
else:
start(cleaner(arguments.URL))
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
arguments = args()
if __name__ == '__main__':
print_banner()
main()
|
from email.policy import default
from .find_classrooms import find_classrooms
from collections import defaultdict
from pprint import pprint
from logging import root
import datetime
import json
MAX_TIME = 20
def _is_room_free(lessons, starting_time, ending_time):
until = MAX_TIME
if len(lessons) == 0:
return (True, until)
for lesson in lessons:
start = float(lesson['from'])
end = float(lesson['to'])
if starting_time <= start and start < ending_time:
return (False, None)
if start <= starting_time and end > starting_time:
return (False, None)
if ending_time <= start and until == MAX_TIME:
until = start
return (True, until)
def find_free_room(starting_time , ending_time , location , day , month , year):
free_rooms = defaultdict(list)
infos = find_classrooms(location , day , month , year)
for building in infos:
for room in infos[building]:
lessons = infos[building][room]['lessons']
free, until = _is_room_free(lessons , starting_time , ending_time)
if free:
room_info = {
'name' : room ,
'link':infos[building][room]['link'],
'until': until
}
free_rooms[building].append(room_info)
return free_rooms
if __name__ == "__main__":
now = datetime.datetime.now()
info = find_free_room(9.25 , 12.25 , 'MIA', 25 , 10 , 2021)
with open('infos_a.json' , 'w') as outfile:
json.dump(info , outfile)
|
"""
ASGI config for videoProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'videoProject.settings')
application = get_asgi_application()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
from fasta import FASTA
from plumbing.databases import convert_to_sql
from plumbing.databases.sqlite_database import SQLiteDatabase
from plumbing.common import GenWithLength
# Third party modules #
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from tqdm import tqdm
# Constants #
base_keys = ('id', 'description', 'seq')
###############################################################################
class DatabaseFASTA(SQLiteDatabase):
def __init__(self, path=None):
self.path = path
self.factory = lambda cursor, row: SeqRecord(Seq(row[2]), id=row[0], description=row[1])
def parse(self):
pass
###############################################################################
def generate_values(path, progress=False):
seqs = SeqIO.parse(path, 'fasta')
if not progress:
for seq in seqs: yield seq.id, seq.description, str(seq.seq)
if progress:
for seq in tqdm(GenWithLength(seqs, len(FASTA(path)))):
yield seq.id, seq.description, str(seq.seq)
###############################################################################
def fasta_to_sql(source, dest):
values = generate_values(source, progress=True)
convert_to_sql(dest, base_keys, values)
return DatabaseFASTA(dest)
|
"""Provide the 'autogenerate' feature which can produce migration operations
automatically."""
import contextlib
from sqlalchemy import inspect
from . import compare
from . import render
from .. import util
from ..operations import ops
def compare_metadata(context, metadata):
"""Compare a database schema to that given in a
:class:`~sqlalchemy.schema.MetaData` instance.
The database connection is presented in the context
of a :class:`.MigrationContext` object, which
provides database connectivity as well as optional
comparison functions to use for datatypes and
server defaults - see the "autogenerate" arguments
at :meth:`.EnvironmentContext.configure`
for details on these.
The return format is a list of "diff" directives,
each representing individual differences::
from alembic.migration import MigrationContext
from alembic.autogenerate import compare_metadata
from sqlalchemy.schema import SchemaItem
from sqlalchemy.types import TypeEngine
from sqlalchemy import (create_engine, MetaData, Column,
Integer, String, Table, text)
import pprint
engine = create_engine("sqlite://")
with engine.begin() as conn:
conn.execute(text('''
create table foo (
id integer not null primary key,
old_data varchar,
x integer
)'''))
conn.execute(text('''
create table bar (
data varchar
)'''))
metadata = MetaData()
Table('foo', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
Column('x', Integer, nullable=False)
)
Table('bat', metadata,
Column('info', String)
)
mc = MigrationContext.configure(engine.connect())
diff = compare_metadata(mc, metadata)
pprint.pprint(diff, indent=2, width=20)
Output::
[ ( 'add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
( 'remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
( 'add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
( 'remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[ ( 'modify_nullable',
None,
'foo',
u'x',
{ 'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
:param context: a :class:`.MigrationContext`
instance.
:param metadata: a :class:`~sqlalchemy.schema.MetaData`
instance.
.. seealso::
:func:`.produce_migrations` - produces a :class:`.MigrationScript`
structure based on metadata comparison.
"""
migration_script = produce_migrations(context, metadata)
return migration_script.upgrade_ops.as_diffs()
def produce_migrations(context, metadata):
"""Produce a :class:`.MigrationScript` structure based on schema
comparison.
This function does essentially what :func:`.compare_metadata` does,
but then runs the resulting list of diffs to produce the full
:class:`.MigrationScript` object. For an example of what this looks like,
see the example in :ref:`customizing_revision`.
.. seealso::
:func:`.compare_metadata` - returns more fundamental "diff"
data from comparing a schema.
"""
autogen_context = AutogenContext(context, metadata=metadata)
migration_script = ops.MigrationScript(
rev_id=None,
upgrade_ops=ops.UpgradeOps([]),
downgrade_ops=ops.DowngradeOps([]),
)
compare._populate_migration_script(autogen_context, migration_script)
return migration_script
def render_python_code(
up_or_down_op,
sqlalchemy_module_prefix="sa.",
alembic_module_prefix="op.",
render_as_batch=False,
imports=(),
render_item=None,
migration_context=None,
):
"""Render Python code given an :class:`.UpgradeOps` or
:class:`.DowngradeOps` object.
This is a convenience function that can be used to test the
autogenerate output of a user-defined :class:`.MigrationScript` structure.
"""
opts = {
"sqlalchemy_module_prefix": sqlalchemy_module_prefix,
"alembic_module_prefix": alembic_module_prefix,
"render_item": render_item,
"render_as_batch": render_as_batch,
}
if migration_context is None:
from ..runtime.migration import MigrationContext
from sqlalchemy.engine.default import DefaultDialect
migration_context = MigrationContext.configure(
dialect=DefaultDialect()
)
autogen_context = AutogenContext(migration_context, opts=opts)
autogen_context.imports = set(imports)
return render._indent(
render._render_cmd_body(up_or_down_op, autogen_context)
)
def _render_migration_diffs(context, template_args):
"""legacy, used by test_autogen_composition at the moment"""
autogen_context = AutogenContext(context)
upgrade_ops = ops.UpgradeOps([])
compare._produce_net_changes(autogen_context, upgrade_ops)
migration_script = ops.MigrationScript(
rev_id=None,
upgrade_ops=upgrade_ops,
downgrade_ops=upgrade_ops.reverse(),
)
render._render_python_into_templatevars(
autogen_context, migration_script, template_args
)
class AutogenContext(object):
"""Maintains configuration and state that's specific to an
autogenerate operation."""
metadata = None
"""The :class:`~sqlalchemy.schema.MetaData` object
representing the destination.
This object is the one that is passed within ``env.py``
to the :paramref:`.EnvironmentContext.configure.target_metadata`
parameter. It represents the structure of :class:`.Table` and other
objects as stated in the current database model, and represents the
destination structure for the database being examined.
While the :class:`~sqlalchemy.schema.MetaData` object is primarily
known as a collection of :class:`~sqlalchemy.schema.Table` objects,
it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary
that may be used by end-user schemes to store additional schema-level
objects that are to be compared in custom autogeneration schemes.
"""
connection = None
"""The :class:`~sqlalchemy.engine.base.Connection` object currently
connected to the database backend being compared.
This is obtained from the :attr:`.MigrationContext.bind` and is
ultimately set up in the ``env.py`` script.
"""
dialect = None
"""The :class:`~sqlalchemy.engine.Dialect` object currently in use.
This is normally obtained from the
:attr:`~sqlalchemy.engine.base.Connection.dialect` attribute.
"""
imports = None
"""A ``set()`` which contains string Python import directives.
The directives are to be rendered into the ``${imports}`` section
of a script template. The set is normally empty and can be modified
within hooks such as the
:paramref:`.EnvironmentContext.configure.render_item` hook.
.. seealso::
:ref:`autogen_render_types`
"""
migration_context = None
"""The :class:`.MigrationContext` established by the ``env.py`` script."""
def __init__(
self, migration_context, metadata=None, opts=None, autogenerate=True
):
if (
autogenerate
and migration_context is not None
and migration_context.as_sql
):
raise util.CommandError(
"autogenerate can't use as_sql=True as it prevents querying "
"the database for schema information"
)
if opts is None:
opts = migration_context.opts
self.metadata = metadata = (
opts.get("target_metadata", None) if metadata is None else metadata
)
if (
autogenerate
and metadata is None
and migration_context is not None
and migration_context.script is not None
):
raise util.CommandError(
"Can't proceed with --autogenerate option; environment "
"script %s does not provide "
"a MetaData object or sequence of objects to the context."
% (migration_context.script.env_py_location)
)
include_object = opts.get("include_object", None)
include_name = opts.get("include_name", None)
object_filters = []
name_filters = []
if include_object:
object_filters.append(include_object)
if include_name:
name_filters.append(include_name)
self._object_filters = object_filters
self._name_filters = name_filters
self.migration_context = migration_context
if self.migration_context is not None:
self.connection = self.migration_context.bind
self.dialect = self.migration_context.dialect
self.imports = set()
self.opts = opts
self._has_batch = False
@util.memoized_property
def inspector(self):
return inspect(self.connection)
@contextlib.contextmanager
def _within_batch(self):
self._has_batch = True
yield
self._has_batch = False
def run_name_filters(self, name, type_, parent_names):
"""Run the context's name filters and return True if the targets
should be part of the autogenerate operation.
This method should be run for every kind of name encountered within the
reflection side of an autogenerate operation, giving the environment
the chance to filter what names should be reflected as database
objects. The filters here are produced directly via the
:paramref:`.EnvironmentContext.configure.include_name` parameter.
"""
if "schema_name" in parent_names:
if type_ == "table":
table_name = name
else:
table_name = parent_names["table_name"]
schema_name = parent_names["schema_name"]
if schema_name:
parent_names["schema_qualified_table_name"] = "%s.%s" % (
schema_name,
table_name,
)
else:
parent_names["schema_qualified_table_name"] = table_name
for fn in self._name_filters:
if not fn(name, type_, parent_names):
return False
else:
return True
def run_object_filters(self, object_, name, type_, reflected, compare_to):
"""Run the context's object filters and return True if the targets
should be part of the autogenerate operation.
This method should be run for every kind of object encountered within
an autogenerate operation, giving the environment the chance
to filter what objects should be included in the comparison.
The filters here are produced directly via the
:paramref:`.EnvironmentContext.configure.include_object` parameter.
"""
for fn in self._object_filters:
if not fn(object_, name, type_, reflected, compare_to):
return False
else:
return True
run_filters = run_object_filters
@util.memoized_property
def sorted_tables(self):
"""Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s).
For a sequence of :class:`.MetaData` objects, this
concatenates the :attr:`.MetaData.sorted_tables` collection
for each individual :class:`.MetaData` in the order of the
sequence. It does **not** collate the sorted tables collections.
"""
result = []
for m in util.to_list(self.metadata):
result.extend(m.sorted_tables)
return result
@util.memoized_property
def table_key_to_table(self):
"""Return an aggregate of the :attr:`.MetaData.tables` dictionaries.
The :attr:`.MetaData.tables` collection is a dictionary of table key
to :class:`.Table`; this method aggregates the dictionary across
multiple :class:`.MetaData` objects into one dictionary.
Duplicate table keys are **not** supported; if two :class:`.MetaData`
objects contain the same table key, an exception is raised.
"""
result = {}
for m in util.to_list(self.metadata):
intersect = set(result).intersection(set(m.tables))
if intersect:
raise ValueError(
"Duplicate table keys across multiple "
"MetaData objects: %s"
% (", ".join('"%s"' % key for key in sorted(intersect)))
)
result.update(m.tables)
return result
class RevisionContext(object):
"""Maintains configuration and state that's specific to a revision
file generation operation."""
def __init__(
self,
config,
script_directory,
command_args,
process_revision_directives=None,
):
self.config = config
self.script_directory = script_directory
self.command_args = command_args
self.process_revision_directives = process_revision_directives
self.template_args = {
"config": config # Let templates use config for
# e.g. multiple databases
}
self.generated_revisions = [self._default_revision()]
def _to_script(self, migration_script):
template_args = {}
for k, v in self.template_args.items():
template_args.setdefault(k, v)
if getattr(migration_script, "_needs_render", False):
autogen_context = self._last_autogen_context
# clear out existing imports if we are doing multiple
# renders
autogen_context.imports = set()
if migration_script.imports:
autogen_context.imports.update(migration_script.imports)
render._render_python_into_templatevars(
autogen_context, migration_script, template_args
)
return self.script_directory.generate_revision(
migration_script.rev_id,
migration_script.message,
refresh=True,
head=migration_script.head,
splice=migration_script.splice,
branch_labels=migration_script.branch_label,
version_path=migration_script.version_path,
depends_on=migration_script.depends_on,
**template_args
)
def run_autogenerate(self, rev, migration_context):
self._run_environment(rev, migration_context, True)
def run_no_autogenerate(self, rev, migration_context):
self._run_environment(rev, migration_context, False)
def _run_environment(self, rev, migration_context, autogenerate):
if autogenerate:
if self.command_args["sql"]:
raise util.CommandError(
"Using --sql with --autogenerate does not make any sense"
)
if set(self.script_directory.get_revisions(rev)) != set(
self.script_directory.get_revisions("heads")
):
raise util.CommandError("Target database is not up to date.")
upgrade_token = migration_context.opts["upgrade_token"]
downgrade_token = migration_context.opts["downgrade_token"]
migration_script = self.generated_revisions[-1]
if not getattr(migration_script, "_needs_render", False):
migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token
migration_script.downgrade_ops_list[
-1
].downgrade_token = downgrade_token
migration_script._needs_render = True
else:
migration_script._upgrade_ops.append(
ops.UpgradeOps([], upgrade_token=upgrade_token)
)
migration_script._downgrade_ops.append(
ops.DowngradeOps([], downgrade_token=downgrade_token)
)
self._last_autogen_context = autogen_context = AutogenContext(
migration_context, autogenerate=autogenerate
)
if autogenerate:
compare._populate_migration_script(
autogen_context, migration_script
)
if self.process_revision_directives:
self.process_revision_directives(
migration_context, rev, self.generated_revisions
)
hook = migration_context.opts["process_revision_directives"]
if hook:
hook(migration_context, rev, self.generated_revisions)
for migration_script in self.generated_revisions:
migration_script._needs_render = True
def _default_revision(self):
op = ops.MigrationScript(
rev_id=self.command_args["rev_id"] or util.rev_id(),
message=self.command_args["message"],
upgrade_ops=ops.UpgradeOps([]),
downgrade_ops=ops.DowngradeOps([]),
head=self.command_args["head"],
splice=self.command_args["splice"],
branch_label=self.command_args["branch_label"],
version_path=self.command_args["version_path"],
depends_on=self.command_args["depends_on"],
)
return op
def generate_scripts(self):
for generated_revision in self.generated_revisions:
yield self._to_script(generated_revision)
|
# encoding: utf-8
from bs4 import BeautifulSoup
import pytest
import six
from ckan.lib.helpers import url_for
import ckan.tests.helpers as helpers
import ckan.model as model
from ckan.tests import factories
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupController(object):
def test_bulk_process_throws_404_for_nonexistent_org(self, app):
bulk_process_url = url_for(
"organization.bulk_process", id="does-not-exist"
)
app.get(url=bulk_process_url, status=404)
def test_page_thru_list_of_orgs_preserves_sort_order(self, app):
orgs = [factories.Organization() for _ in range(35)]
org_url = url_for("organization.index", sort="name desc")
response = app.get(url=org_url)
assert orgs[-1]["name"] in response
assert orgs[0]["name"] not in response
org_url = url_for("organization.index", sort="name desc", page=2)
response = app.get(url=org_url)
assert orgs[-1]["name"] not in response
assert orgs[0]["name"] in response
def test_page_thru_list_of_groups_preserves_sort_order(self, app):
groups = [factories.Group() for _ in range(35)]
group_url = url_for("group.index", sort="title desc")
response = app.get(url=group_url)
assert groups[-1]["title"] in response
assert groups[0]["title"] not in response
org_url = url_for("group.index", sort="title desc", page=2)
response = app.get(url=org_url)
assert groups[-1]["title"] not in response
assert groups[0]["title"] in response
def test_invalid_sort_param_does_not_crash(self, app):
with app.flask_app.test_request_context():
group_url = url_for("group.index", sort="title desc nope")
app.get(url=group_url)
group_url = url_for("group.index", sort="title nope desc nope")
app.get(url=group_url)
def _get_group_new_page(app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(url=url_for("group.new"), extra_environ=env)
return env, response
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupControllerNew(object):
def test_not_logged_in(self, app):
app.get(url=url_for("group.new"), status=403)
def test_name_required(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url=url_for("group.new"), extra_environ=env, data={"save": ""}
)
assert "Name: Missing value" in response
def test_saved(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
form = {"name": "saved", "save": ""}
app.post(url=url_for("group.new"), extra_environ=env, data=form)
group = model.Group.by_name(u"saved")
assert group.title == u""
assert group.type == "group"
assert group.state == "active"
def test_all_fields_saved(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
form = {
"name": u"all-fields-saved",
"title": "Science",
"description": "Sciencey datasets",
"image_url": "http://example.com/image.png",
"save": "",
}
app.post(url=url_for("group.new"), extra_environ=env, data=form)
group = model.Group.by_name(u"all-fields-saved")
assert group.title == u"Science"
assert group.description == "Sciencey datasets"
def _get_group_edit_page(app, group_name=None):
user = factories.User()
if group_name is None:
group = factories.Group(user=user)
group_name = group["name"]
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("group.edit", id=group_name)
response = app.get(url=url, extra_environ=env)
return env, response, group_name
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupControllerEdit(object):
def test_not_logged_in(self, app):
app.get(url=url_for("group.new"), status=403)
def test_group_doesnt_exist(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("group.edit", id="doesnt_exist")
app.get(url=url, extra_environ=env, status=404)
def test_saved(self, app):
user = factories.User()
group = factories.Group(user=user)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
form = {"save": ""}
app.post(
url=url_for("group.edit", id=group["name"]),
extra_environ=env,
data=form,
)
group = model.Group.by_name(group["name"])
assert group.state == "active"
def test_all_fields_saved(self, app):
user = factories.User()
group = factories.Group(user=user)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
form = {
"name": u"all-fields-edited",
"title": "Science",
"description": "Sciencey datasets",
"image_url": "http://example.com/image.png",
"save": "",
}
resp = app.post(
url=url_for("group.edit", id=group["name"]),
extra_environ=env,
data=form,
)
group = model.Group.by_name(u"all-fields-edited")
assert group.title == u"Science"
assert group.description == "Sciencey datasets"
assert group.image_url == "http://example.com/image.png"
def test_display_name_shown(self, app):
user = factories.User()
group = factories.Group(
name="display-name",
title="Display name",
user=user,
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
form = {
"name": "",
"save": "",
}
resp = app.get(
url=url_for("group.edit", id=group["name"]),
extra_environ=env,
)
page = BeautifulSoup(resp.body)
breadcrumbs = page.select('.breadcrumb a')
# Home -> Groups -> NAME -> Manage
assert len(breadcrumbs) == 4
# Verify that `NAME` is not empty, as well as other parts
assert all([part.text for part in breadcrumbs])
resp = app.post(
url=url_for("group.edit", id=group["name"]),
extra_environ=env,
data=form,
)
page = BeautifulSoup(resp.body)
breadcrumbs = page.select('.breadcrumb a')
# Home -> Groups -> NAME -> Manage
assert len(breadcrumbs) == 4
# Verify that `NAME` is not empty, as well as other parts
assert all([part.text for part in breadcrumbs])
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupRead(object):
def test_group_read(self, app):
group = factories.Group()
response = app.get(url=url_for("group.read", id=group["name"]))
assert group["title"] in response
assert group["description"] in response
def test_redirect_when_given_id(self, app):
group = factories.Group()
response = app.get(
url_for("group.read", id=group["id"]),
status=302,
follow_redirects=False,
)
location = response.headers["location"]
expected_url = url_for("group.read", id=group["name"], _external=True)
assert location == expected_url
def test_no_redirect_loop_when_name_is_the_same_as_the_id(self, app):
group = factories.Group(id="abc", name="abc")
# 200 == no redirect
app.get(url_for("group.read", id=group["id"]), status=200)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupDelete(object):
@pytest.fixture
def initial_data(self):
user = factories.User()
return {
"user": user,
"user_env": {"REMOTE_USER": six.ensure_str(user["name"])},
"group": factories.Group(user=user),
}
def test_owner_delete(self, app, initial_data):
response = app.post(
url=url_for("group.delete", id=initial_data["group"]["id"]),
data={"delete": ""},
extra_environ=initial_data["user_env"],
)
group = helpers.call_action(
"group_show", id=initial_data["group"]["id"]
)
assert group["state"] == "deleted"
def test_sysadmin_delete(self, app, initial_data):
sysadmin = factories.Sysadmin()
extra_environ = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.post(
url=url_for("group.delete", id=initial_data["group"]["id"]),
data={"delete": ""},
extra_environ=extra_environ,
)
group = helpers.call_action(
"group_show", id=initial_data["group"]["id"]
)
assert group["state"] == "deleted"
def test_non_authorized_user_trying_to_delete_fails(
self, app, initial_data
):
user = factories.User()
extra_environ = {"REMOTE_USER": six.ensure_str(user["name"])}
app.get(
url=url_for("group.delete", id=initial_data["group"]["id"]),
status=403,
extra_environ=extra_environ,
)
group = helpers.call_action(
"group_show", id=initial_data["group"]["id"]
)
assert group["state"] == "active"
def test_anon_user_trying_to_delete_fails(self, app, initial_data):
app.get(
url=url_for("group.delete", id=initial_data["group"]["id"]),
status=403,
)
group = helpers.call_action(
"group_show", id=initial_data["group"]["id"]
)
assert group["state"] == "active"
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupMembership(object):
def _create_group(self, owner_username, users=None):
"""Create a group with the owner defined by owner_username and
optionally with a list of other users."""
if users is None:
users = []
context = {"user": owner_username, "ignore_auth": True}
group = helpers.call_action(
"group_create", context=context, name="test-group", users=users
)
return group
def _get_group_add_member_page(self, app, user, group_name):
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("group.member_new", id=group_name)
response = app.get(url=url, extra_environ=env)
return env, response
def test_membership_list(self, app):
"""List group admins and members"""
user_one = factories.User(fullname="User One", name="user-one")
user_two = factories.User(fullname="User Two")
other_users = [{"name": user_two["id"], "capacity": "member"}]
group = self._create_group(user_one["name"], other_users)
member_list_url = url_for("group.members", id=group["id"])
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
member_list_response = app.get(member_list_url, extra_environ=env)
assert "2 members" in member_list_response
member_response_html = BeautifulSoup(member_list_response.body)
user_names = [
u.string
for u in member_response_html.select("#member-table td.media a")
]
roles = [
r.next_sibling.next_sibling.string
for r in member_response_html.select("#member-table td.media")
]
user_roles = dict(zip(user_names, roles))
assert user_roles["User One"] == "Admin"
assert user_roles["User Two"] == "Member"
def test_membership_add(self, app):
"""Member can be added via add member page"""
owner = factories.User(fullname="My Owner")
factories.User(fullname="My Fullname", name="my-user")
group = self._create_group(owner["name"])
env = {"REMOTE_USER": six.ensure_str(owner["name"])}
url = url_for("group.member_new", id=group["name"])
add_response = app.post(
url,
environ_overrides=env,
data={"save": "", "username": "my-user", "role": "member"},
)
assert "2 members" in add_response.body
add_response_html = BeautifulSoup(add_response.body)
user_names = [
u.string
for u in add_response_html.select("#member-table td.media a")
]
roles = [
r.next_sibling.next_sibling.string
for r in add_response_html.select("#member-table td.media")
]
user_roles = dict(zip(user_names, roles))
assert user_roles["My Owner"] == "Admin"
assert user_roles["My Fullname"] == "Member"
def test_admin_add(self, app):
"""Admin can be added via add member page"""
owner = factories.User(fullname="My Owner")
factories.User(fullname="My Fullname", name="my-user")
group = self._create_group(owner["name"])
env = {"REMOTE_USER": six.ensure_str(owner["name"])}
url = url_for("group.member_new", id=group["name"])
add_response = app.post(
url,
environ_overrides=env,
data={"save": "", "username": "my-user", "role": "admin"},
)
assert "2 members" in add_response
add_response_html = BeautifulSoup(add_response.body)
user_names = [
u.string
for u in add_response_html.select("#member-table td.media a")
]
roles = [
r.next_sibling.next_sibling.string
for r in add_response_html.select("#member-table td.media")
]
user_roles = dict(zip(user_names, roles))
assert user_roles["My Owner"] == "Admin"
assert user_roles["My Fullname"] == "Admin"
def test_remove_member(self, app):
"""Member can be removed from group"""
user_one = factories.User(fullname="User One", name="user-one")
user_two = factories.User(fullname="User Two")
other_users = [{"name": user_two["id"], "capacity": "member"}]
group = self._create_group(user_one["name"], other_users)
remove_url = url_for(
"group.member_delete", user=user_two["id"], id=group["id"]
)
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
remove_response = app.post(remove_url, extra_environ=env)
assert helpers.body_contains(remove_response, "1 members")
remove_response_html = BeautifulSoup(remove_response.body)
user_names = [
u.string
for u in remove_response_html.select("#member-table td.media a")
]
roles = [
r.next_sibling.next_sibling.string
for r in remove_response_html.select("#member-table td.media")
]
user_roles = dict(zip(user_names, roles))
assert len(user_roles.keys()) == 1
assert user_roles["User One"] == "Admin"
def test_member_users_cannot_add_members(self, app):
user = factories.User()
group = factories.Group(
users=[{"name": user["name"], "capacity": "member"}]
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
with app.flask_app.test_request_context():
app.get(
url_for("group.member_new", id=group["id"]),
extra_environ=env,
status=403,
)
app.post(
url_for("group.member_new", id=group["id"]),
data={
"id": "test",
"username": "test",
"save": "save",
"role": "test",
},
extra_environ=env,
status=403,
)
def test_anonymous_users_cannot_add_members(self, app):
group = factories.Group()
with app.flask_app.test_request_context():
app.get(url_for("group.member_new", id=group["id"]), status=403)
app.post(
url_for("group.member_new", id=group["id"]),
data={
"id": "test",
"username": "test",
"save": "save",
"role": "test",
},
status=403,
)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupFollow:
def test_group_follow(self, app):
user = factories.User()
group = factories.Group()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
follow_url = url_for("group.follow", id=group["id"])
response = app.post(follow_url, extra_environ=env)
assert (
"You are now following {0}".format(group["display_name"])
in response
)
def test_group_follow_not_exist(self, app):
"""Pass an id for a group that doesn't exist"""
user_one = factories.User()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("group.follow", id="not-here")
response = app.post(follow_url, extra_environ=env, status=404)
assert "Group not found" in response
def test_group_unfollow(self, app):
user_one = factories.User()
group = factories.Group()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("group.follow", id=group["id"])
app.post(follow_url, extra_environ=env)
unfollow_url = url_for("group.unfollow", id=group["id"])
unfollow_response = app.post(unfollow_url, extra_environ=env)
assert (
"You are no longer following {0}".format(group["display_name"])
in unfollow_response
)
def test_group_unfollow_not_following(self, app):
"""Unfollow a group not currently following"""
user_one = factories.User()
group = factories.Group()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
unfollow_url = url_for("group.unfollow", id=group["id"])
unfollow_response = app.post(unfollow_url, extra_environ=env)
assert (
"You are not following {0}".format(group["id"])
in unfollow_response
)
def test_group_unfollow_not_exist(self, app):
"""Unfollow a group that doesn't exist."""
user_one = factories.User()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
unfollow_url = url_for("group.unfollow", id="not-here")
unfollow_response = app.post(
unfollow_url, extra_environ=env, status=404
)
def test_group_follower_list(self, app):
"""Following users appear on followers list page."""
user_one = factories.Sysadmin()
group = factories.Group()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("group.follow", id=group["id"])
app.post(follow_url, extra_environ=env)
followers_url = url_for("group.followers", id=group["id"])
# Only sysadmins can view the followers list pages
followers_response = app.get(
followers_url, extra_environ=env, status=200
)
assert user_one["display_name"] in followers_response
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestGroupSearch(object):
"""Test searching for groups."""
def test_group_search(self, app):
"""Requesting group search (index) returns list of groups and search
form."""
factories.Group(name="grp-one", title="AGrp One")
factories.Group(name="grp-two", title="AGrp Two")
factories.Group(name="grp-three", title="Grp Three")
index_response = app.get(url_for("group.index"))
index_response_html = BeautifulSoup(index_response.body)
grp_names = index_response_html.select(
"ul.media-grid " "li.media-item " "h3.media-heading"
)
grp_names = [n.string for n in grp_names]
assert len(grp_names) == 3
assert "AGrp One" in grp_names
assert "AGrp Two" in grp_names
assert "Grp Three" in grp_names
def test_group_search_results(self, app):
"""Searching via group search form returns list of expected groups."""
factories.Group(name="grp-one", title="AGrp One")
factories.Group(name="grp-two", title="AGrp Two")
factories.Group(name="grp-three", title="Grp Three")
search_response = app.get(
url_for("group.index"), query_string={"q": "AGrp"}
)
search_response_html = BeautifulSoup(search_response.body)
grp_names = search_response_html.select(
"ul.media-grid " "li.media-item " "h3.media-heading"
)
grp_names = [n.string for n in grp_names]
assert len(grp_names) == 2
assert "AGrp One" in grp_names
assert "AGrp Two" in grp_names
assert "Grp Three" not in grp_names
def test_group_search_no_results(self, app):
"""Searching with a term that doesn't apply returns no results."""
factories.Group(name="grp-one", title="AGrp One")
factories.Group(name="grp-two", title="AGrp Two")
factories.Group(name="grp-three", title="Grp Three")
search_response = app.get(
url_for("group.index"), query_string={"q": "No Results Here"}
)
search_response_html = BeautifulSoup(search_response.body)
grp_names = search_response_html.select(
"ul.media-grid " "li.media-item " "h3.media-heading"
)
grp_names = [n.string for n in grp_names]
assert len(grp_names) == 0
assert 'No groups found for "No Results Here"' in search_response
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestGroupInnerSearch(object):
"""Test searching within an group."""
def test_group_search_within_org(self, app):
"""Group read page request returns list of datasets owned by group."""
grp = factories.Group()
factories.Dataset(
name="ds-one", title="Dataset One", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-two", title="Dataset Two", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-three", title="Dataset Three", groups=[{"id": grp["id"]}]
)
grp_url = url_for("group.read", id=grp["name"])
grp_response = app.get(grp_url)
grp_response_html = BeautifulSoup(grp_response.body)
ds_titles = grp_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [t.string for t in ds_titles]
assert "3 datasets found" in grp_response
assert len(ds_titles) == 3
assert "Dataset One" in ds_titles
assert "Dataset Two" in ds_titles
assert "Dataset Three" in ds_titles
def test_group_search_within_org_results(self, app):
"""Searching within an group returns expected dataset results."""
grp = factories.Group()
factories.Dataset(
name="ds-one", title="Dataset One", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-two", title="Dataset Two", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-three", title="Dataset Three", groups=[{"id": grp["id"]}]
)
grp_url = url_for("group.read", id=grp["name"])
search_response = app.get(grp_url, query_string={"q": "One"})
assert "1 dataset found for "One"" in search_response
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [t.string for t in ds_titles]
assert len(ds_titles) == 1
assert "Dataset One" in ds_titles
assert "Dataset Two" not in ds_titles
assert "Dataset Three" not in ds_titles
def test_group_search_within_org_no_results(self, app):
"""Searching for non-returning phrase within an group returns no
results."""
grp = factories.Group()
factories.Dataset(
name="ds-one", title="Dataset One", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-two", title="Dataset Two", groups=[{"id": grp["id"]}]
)
factories.Dataset(
name="ds-three", title="Dataset Three", groups=[{"id": grp["id"]}]
)
grp_url = url_for("group.read", id=grp["name"])
search_response = app.get(grp_url, query_string={"q": "Nout"})
assert helpers.body_contains(
search_response, 'No datasets found for "Nout"'
)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [t.string for t in ds_titles]
assert len(ds_titles) == 0
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupIndex(object):
def test_group_index(self, app):
for i in range(1, 26):
_i = "0" + str(i) if i < 10 else i
factories.Group(
name="test-group-{0}".format(_i),
title="Test Group {0}".format(_i),
)
url = url_for("group.index")
response = app.get(url)
for i in range(1, 21):
_i = "0" + str(i) if i < 10 else i
assert "Test Group {0}".format(_i) in response
assert "Test Group 21" not in response
url = url_for("group.index", page=1)
response = app.get(url)
for i in range(1, 21):
_i = "0" + str(i) if i < 10 else i
assert "Test Group {0}".format(_i) in response
assert "Test Group 21" not in response
url = url_for("group.index", page=2)
response = app.get(url)
for i in range(21, 26):
assert "Test Group {0}".format(i) in response
assert "Test Group 20" not in response
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestActivity:
def test_simple(self, app):
"""Checking the template shows the activity stream."""
user = factories.User()
group = factories.Group(user=user)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert "Mr. Test User" in response
assert "created the group" in response
def test_create_group(self, app):
user = factories.User()
group = factories.Group(user=user)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "created the group" in response
assert (
'<a href="/group/{}">Test Group'.format(group["name"]) in response
)
def _clear_activities(self):
model.Session.query(model.ActivityDetail).delete()
model.Session.query(model.Activity).delete()
model.Session.flush()
def test_change_group(self, app):
user = factories.User()
group = factories.Group(user=user)
self._clear_activities()
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the group" in response
assert (
'<a href="/group/{}">Group with changed title'.format(
group["name"]
)
in response
)
def test_delete_group_using_group_delete(self, app):
user = factories.User()
group = factories.Group(user=user)
self._clear_activities()
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
url = url_for("group.activity", id=group["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(url, extra_environ=env, status=404)
# group_delete causes the Member to state=deleted and then the user
# doesn't have permission to see their own deleted Group. Therefore you
# can't render the activity stream of that group. You'd hope that
# group_delete was the same as group_update state=deleted but they are
# not...
def test_delete_group_by_updating_state(self, app):
user = factories.User()
group = factories.Group(user=user)
self._clear_activities()
group["state"] = "deleted"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
url = url_for("group.activity", id=group["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(url, extra_environ=env)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "deleted the group" in response
assert (
'<a href="/group/{}">Test Group'.format(group["name"]) in response
)
def test_create_dataset(self, app):
user = factories.User()
group = factories.Group(user=user)
self._clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "created the dataset" in response
assert (
'<a href="/dataset/{}">Test Dataset'.format(dataset["id"])
in response
)
def test_change_dataset(self, app):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
self._clear_activities()
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">Dataset with changed title'.format(
dataset["id"]
)
in response
)
def test_delete_dataset(self, app):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
self._clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
url = url_for("group.activity", id=group["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "deleted the dataset" in response
assert (
'<a href="/dataset/{}">Test Dataset'.format(dataset["id"])
in response
)
|
# -*- encoding: utf-8 -*-
'''
Current module: pyrunner.ext.idleshell.TextEditDelegator
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: rock4.common.dev.idleshell.TextEditDelegator,v 2.0 2017年2月7日
FROM: 2016年8月18日
********************************************************************
======================================================================
UI and Web Http automation frame for python.
'''
from idlelib.ColorDelegator import ColorDelegator
from idlelib.Percolator import Percolator
from SimpleAutoComplete import SimpleAutoComplete
class TextEditDelegator():
'''
from pyrunner.tkui.suite import Components
from pyrunner.tkui.ui import ROOT,Widget,Window
frame1 = Widget.Labelframe(ROOT,text = "XXXX")
Window.widg = frame1
Window.Pack(side = "top", fill="both", expand="yes", padx = "0.2c")
(t,x,y) = Components.TextWithScrollbar(frame1)
a = TextEditDelegator(t)
a.effect_on_text("STRING", {'font': u"Calibri 10 normal roman",'foreground': 'red','background': '#ffffff'})
t.insert("end","0.ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss\n")
t.insert("end","1.sdf\n")
ROOT.mainloop()
'''
def __init__(self, master, rpcclt = None):
self.text = master
self.color_delegator = ColorDelegator()
self.auto_complete = SimpleAutoComplete(master = master, rpcclt = rpcclt)
def effect_on_text(self, tag_name = None, value = None):
p = Percolator(self.text)
if tag_name and value:
self.__set_idlelib_tag_defs(tag_name, value)
p.insertfilter(self.color_delegator)
self.__add_common_event()
def __set_idlelib_tag_defs(self, tag_name, value):
''' parameter:
tag_name --> should be in ColorDelegator().tagdefs.keys(); they are ("COMMENT", "DEFINITION", "BUILTIN", "hit", "STRING", "KEYWORD", "ERROR", "TODO", "SYNC", "BREAK")
value --> Tkinte.Text's STANDARD OPTIONS
# idlelib --> default tagdefs
self.tagdefs = {
'COMMENT': {'foreground': '#dd0000','background': '#ffffff'},
'DEFINITION': {'foreground': '#0000ff','background': '#ffffff'},
'BUILTIN': {'foreground': '#900090','background': '#ffffff'},
'hit': {'foreground': '#ffffff','background': '#000000'},
'STRING': {'foreground': '#00aa00','background': '#ffffff'},
'KEYWORD': {'foreground': '#ff7700','background': '#ffffff'},
'ERROR': {'foreground': '#000000','background': '#ff7777'},
'TODO': {'foreground': None,'background': None},
'SYNC': {'foreground': None,'background': None},
'BREAK': {'foreground': 'black','background': '#ffff55'}
}
'''
if tag_name not in self.color_delegator.tagdefs.keys():
return
if not isinstance(value, dict):
return
tagdefs = {
'COMMENT': {'foreground': None,'background': None},
'DEFINITION': {'foreground': None,'background': None},
'BUILTIN': {'foreground': None,'background': None},
'hit': {'foreground': None,'background': None},
'STRING': {'foreground': None,'background': None},
'KEYWORD': {'foreground': None,'background': None},
'ERROR': {'foreground': None,'background': None},
'TODO': {'foreground': None,'background': None},
'SYNC': {'foreground': None,'background': None},
'BREAK': {'foreground': None,'background': None}
}
tagdefs.update({tag_name:value})
self.color_delegator.tagdefs.update(tagdefs)
def __show_idlelib_comp_lists_backup(self,event):
if not self.comp_lists:
return
comp_lists = (self.comp_lists,[])
comp_start=""
complete=True
mode = 1
userWantsWin = True
self.auto_complete_win.show_window(comp_lists,"insert-%dc" % len(comp_start),complete,mode,userWantsWin)
def __event_add(self, viture_event, keylist):
# Tkinter源码中event_add, 用于新增一个虚拟事件,并将该虚拟事件绑定到 事件序列;
# 事件序列,有其相应的格式,Tkinter.py-》Misc.bind的__doc__中有详细说明; 虚拟事件,格式<<AsString>>也在 这里有说明,其中AsString是任意的
# event_add("<<copy>>", ['<Control-Key-c>', '<Control-Key-C>'])
for k in keylist:
self.text.event_add(viture_event,k)
def __cut(self, event):
# Tkinter源码中event_add的__doc__介绍: Generate an event SEQUENCE.
# 个人理解为,产生或者激发一次虚拟事件,如果绑定了 Func,那么就调用Func.
self.text.event_generate("<<Cut>>")
return "break"
def __copy(self, event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def __paste(self, event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def __select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def __redo(self, event):
self.text.event_generate('<<Redo>>')
return "break"
def __undo(self, event):
self.text.event_generate('<<Undo>>')
return "break"
def __add_common_event(self):
vevent = {
"<<autocomplete>>": (['<Alt-Key-/>'], self.auto_complete.autocomplete_event),
"<<copy>>": (['<Control-Key-c>', '<Control-Key-C>'], self.__copy),
"<<cut>>": (['<Control-Key-x>', '<Control-Key-X>'], self.__cut),
"<<paste>>": (['<Control-Key-v>', '<Control-Key-V>'], self.__paste),
"<<select-all>>": (['<Control-Key-a>', '<Control-Key-A>'], self.__select_all),
"<<redo>>": (['<Control-Key-y>', '<Control-Key-Y>'], self.__redo),
"<<undo>>": (['<Control-Key-z>', '<Control-Key-Z>'], self.__undo),
}
for seq, seq_key in vevent.items():
self.__event_add(seq, seq_key[0])
self.text.bind(seq, seq_key[1])
if __name__ == "__main__":
from rock4.common.dev.tkui.suite import Components
from rock4.common.dev.tkui.ui import ROOT,Widget,Window
frame1 = Widget.Labelframe(ROOT,text = "XXXX")
Window.widg = frame1
Window.Pack(side = "top", fill="both", expand="yes", padx = "0.2c")
(t,x,y) = Components.TextWithScrollbar(frame1)
a = TextEditDelegator(t)
a.effect_on_text("STRING", {'font': u"Calibri 10 normal roman",'foreground': 'red','background': '#ffffff'})
t.insert("end","0.ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss\n")
t.insert("end","1.sdf\n")
ROOT.mainloop()
|
"""
Django settings for supermarket_deals_29891 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"chat",
"chat_user_profile",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "supermarket_deals_29891.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "web_build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "supermarket_deals_29891.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "web_build/static"),
]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
import heapq
from typing import List
class Solution:
# Time complexity: O(n * log n)
# Space complexity: O(1)
def lastStoneWeight(self, stones: List[int]) -> int:
for i in range(len(stones)):
stones[i] = -stones[i]
heapq.heapify(stones)
while len(stones) > 1:
larger, smaller = heapq.heappop(stones), heapq.heappop(stones)
larger -= smaller
if larger < 0:
heapq.heappush(stones, larger)
return abs(stones.pop()) if stones else 0
|
# -*- coding: utf-8 -*-
import demjson
class FlowLauncherAPI:
@classmethod
def change_query(cls, query, requery: bool = False):
"""
change flow launcher query
"""
print(demjson.encode({
"method": "Flow.Launcher.ChangeQuery",
"parameters": [query, requery]}))
@classmethod
def shell_run(cls, cmd):
"""
run shell commands
"""
print(demjson.encode({
"method": "Flow.Launcher.ShellRun",
"parameters": [cmd]}))
@classmethod
def close_app(cls):
"""
close flow launcher
"""
print(demjson.encode({
"method": "Flow.Launcher.CloseApp",
"parameters": []}))
@classmethod
def hide_app(cls):
"""
hide flow launcher
"""
print(demjson.encode({
"method": "Flow.Launcher.HideApp",
"parameters": []}))
@classmethod
def show_app(cls):
"""
show flow launcher
"""
print(demjson.encode({
"method": "Flow.Launcher.ShowApp",
"parameters": []}))
@classmethod
def show_msg(cls, title: str, sub_title: str, ico_path: str = ""):
"""
show messagebox
"""
print(demjson.encode({
"method": "Flow.Launcher.ShowMsg",
"parameters": [title, sub_title, ico_path]}))
@classmethod
def open_setting_dialog(cls):
"""
open setting dialog
"""
print(demjson.encode({
"method": "Flow.Launcher.OpenSettingDialog",
"parameters": []}))
@classmethod
def start_loadingbar(cls):
"""
start loading animation in flow launcher
"""
print(demjson.encode({
"method": "Flow.Launcher.StartLoadingBar",
"parameters": []}))
@classmethod
def stop_loadingbar(cls):
"""
stop loading animation in flow launcher
"""
print(demjson.encode({
"method": "Flow.Launcher.StopLoadingBar",
"parameters": []}))
@classmethod
def reload_plugins(cls):
"""
reload all flow launcher plugins
"""
print(demjson.encode({
"method": "Flow.Launcher.ReloadPlugins",
"parameters": []}))
|
import argparse
import json
import os
import sys
from datetime import datetime
from posixpath import join, exists
from bgesdk.client import API
from bgesdk.error import APIError
from bgesdk.management.command import BaseCommand
from bgesdk.management.constants import (
TAB_CHOICES, TITLE_NAME, API_TABLE, DEFAULT_TOKEN_SECTION,
DEFAULT_OAUTH2_SECTION, DEFAULT_MODEL_TIMEOUT
)
from bgesdk.management.utils import (
config_get, get_active_project, read_config, get_home,
output, SYS_STR
)
from bgesdk.management.validate import validator_doc
class Command(BaseCommand):
order = 6
help = '对模型文档进行预览和发布,可访问 ' \
'https://api.bge.genomics.cn/doc/scripts/model_doc.json 下载示例文件'
def add_arguments(self, parser):
home = get_home()
doc_ps = parser.add_subparsers(
help='对模型文档进行预览和发布',
required=False
)
init_p = doc_ps.add_parser(
'init',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='初始化 docsify 项目。'
)
init_p.add_argument(
'name',
type=str,
default='docs',
help='docsify 项目名称'
)
init_p.add_argument(
'--home',
type=str,
default=home,
help='docsify 项目生成的父级目录,默认为当前目录'
)
init_p.set_defaults(method=self.init_docsify, parser=init_p)
pre_p = doc_ps.add_parser(
'preview',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='对模型文档进行预览'
)
pre_p.add_argument(
'path',
type=str,
help='模型文档的 json 文件路径。'
)
pre_p.add_argument(
'--home',
type=str,
default=home,
help='docsify 项目的根目录,默认为当前目录'
)
pre_p.set_defaults(method=self.preview, parser=pre_p)
release_p = doc_ps.add_parser(
'release',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='发布模型文档'
)
release_p.set_defaults(method=self.release_doc, parser=release_p)
def handler(self, args):
"""打印 subparser 帮助信息"""
parser = args.parser
parser.print_help(sys.stderr)
def init_docsify(self, args):
name = args.name
home = args.home
if home is None:
home = get_home()
docs_dir = join(home, name)
if exists(docs_dir):
output('错误!{} 已存在 '.format(docs_dir))
sys.exit(1)
if not exists(home):
output('错误!无法找到 home 目录 {}。'.format(home))
sys.exit(1)
with os.popen('docsify init {}'.format(docs_dir)) as f:
content = f.read()
if content:
output('docsify 项目已初始化,路径为:{}'.format(docs_dir))
output('请跳转至项目目录下。')
def get_docs_dir(self, home=None):
if home is None:
home = get_home()
doc_path = join(home, 'index.html')
if not exists(doc_path):
output('请确认当前目录或者所输入的项目路径是否为 docsify 项目的根目录。')
sys.exit(1)
return home
def preview(self, args):
command = 'whereis docsify' if SYS_STR == 'windows' else 'which docsify'
with os.popen(command) as f:
content = f.read()
if not content:
output('请先安装 docsify,参考 https://docsify.js.org/#/quickstart')
sys.exit(1)
path = args.path
if not exists(path):
output('文件路径:{} 有误,请检查。'.format(path))
sys.exit(1)
docs_dir = self.get_docs_dir(home=args.home)
doc_data = json.load(open(path))
result = validator_doc(doc_data)
if result['valid'] is False:
output('文件内容有误,错误内容:{}'.format(result['errors']))
sys.exit(1)
doc_tab = doc_data['doc_tab']
model_id = doc_data['model_id']
doc_content = doc_data['doc_content']
sidebar_path = join(docs_dir, '_sidebar.md')
file_dir = join(docs_dir, 'model_center')
sidebar_lines = []
req_path = 'model_center/{}.md'.format(model_id)
for content in doc_content:
language = content['language']
doc_name = content['doc_name']
if language == 'en':
file_dir = join(
docs_dir, 'en', 'model_center')
req_path = 'en/model_center/{}.md'.format(model_id)
if not exists(file_dir):
os.makedirs(file_dir)
file_path = join(
file_dir, '{}.md'.format(model_id))
sidebar = ' * [{}]({})'.format(doc_name, req_path)
sidebar_lines.append(sidebar)
self._make_doc(content, file_path, model_id)
self._write_to_sidebar(doc_tab, sidebar_path, sidebar_lines)
self._write_index(docs_dir)
os.system('docsify serve {} --port 3000'.format(docs_dir))
def _make_doc(self, content, file_path, model_id):
line_feed = '\n'
lines = []
divid_line = '--------'
developer = 'Developer: {}'.format(content.get('developer', ''))
content_title = '# {}'.format(content.get('content_title', ''))
ctime = 'Ctime: {}'.format(
datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'))
version_line = 'Version:'
join_lines = [content_title, ctime, developer, version_line,
divid_line]
for key, value in TITLE_NAME.items():
title_line = '#### {}'.format(value)
join_model_id = None
science_detail_title = None
if key == 'brief_intro':
title_line = '### {}'.format(value)
join_model_id = '**model_id**: `{}`'.format(model_id)
science_detail_title = '### {}'.format('科学细节')
handle_content = self._join_content(content.get(key, {}))
join_lines.append(title_line)
join_lines.extend(handle_content)
join_lines.append(join_model_id)
join_lines.append(science_detail_title)
api_title = '### {}'.format('API调用')
join_lines.append(api_title)
for key, value in API_TABLE.items():
params_line = ''.format(key, value, key)
params = dict()
if key == 'QueryParams':
pass
elif key == 'Success':
params = content.get('return_params', None)
elif key == 'State':
params = content.get('state_explain', None)
params_table = self._join_table(params)
join_lines.append(params_line)
join_lines.append(params_table)
example_result_line = ''
json_str = '```json'
example_result = content.get('example_result')
example_result_json = ''
if example_result:
example_result_json = json.dumps(
example_result, sort_keys=False, indent=4)
last_json_str = '```'
join_lines.append(example_result_line)
join_lines.append(json_str)
join_lines.append(example_result_json)
join_lines.append(last_json_str)
join_lines.append(divid_line)
ref_line = '### {}'.format('参考文献')
join_lines.append(ref_line)
refs = content.get('ref', [])
ref_lines = []
if refs:
for index, ref in enumerate(refs):
ref_str = r'\[{}]: {}{}'.format(index + 1, ref, '<br>')
ref_lines.append(ref_str)
join_lines.append(ref_lines)
for line in join_lines:
if isinstance(line, list):
for l in line:
lines.append(l + line_feed)
elif isinstance(line, str):
lines.append(line + line_feed)
lines.append(line_feed)
with open(file_path, 'w') as f:
f.writelines(lines)
def _join_content(self, content):
line_feed = '\n'
templates = content.get('templates', None)
arguments = content.get('arguments', None)
if templates:
description_lines = []
params = arguments.get('image', None)
target_array = 0
for tem in templates:
tem = str(tem)
word = '{%s}' % 'image'
if word in tem:
word_count = tem.count(word)
for index in range(target_array,
target_array + word_count):
try:
data = params[index]
except:
output(
'arguments中的关键词数组应大于'
'等于templates中的关键词数组')
sys.exit(1)
image_md = '''{}<br><center><img src="{}"
width="386" height="386" /><div style="color: #999;
">{}</div></center><br>{}'''.format(
line_feed, data['uri'], data['caption'],
line_feed)
tem = tem.replace(word, image_md, 1)
target_array += word_count
description_lines.append(tem)
return description_lines
return ['未提及', 'Not mentioned']
def _join_table(self, params):
table_lines = []
table_1 = '| {} | {} |'.format('数据名'.ljust(29), '描述'.ljust(29))
table_2 = '| {} | {} |'.format('-' * 31, '-' * 31)
table_lines.append(table_1)
table_lines.append(table_2)
for key, value in params.items():
param_line = '| {} | {} |'.format(
str(key).ljust(31), str(value))
table_lines.append(param_line)
return table_lines
def _write_to_sidebar(self, doc_tab, sidebar_path, sidebar_lines):
sidebar_title = '* :chart_with_upwards_trend: 模型中心'
tab_choices = dict(TAB_CHOICES)
tab = ' * {}'.format(tab_choices.get(doc_tab))
lines = [sidebar_title, tab]
lines.extend(sidebar_lines)
with open(sidebar_path, 'w') as f:
for line in lines:
f.write(line)
f.write('\n')
def _write_index(self, docs_dir):
index_path = join(docs_dir, 'index.html')
if exists(index_path) is False:
output('docs 目录下无 index.html')
sys.exit(1)
lines = []
with open(index_path, 'r') as f:
for line in f.readlines():
lines.append(line)
if line == ' window.$docsify = {\n':
lines.append(' loadSidebar: true,\n')
elif line == ' </script>\n':
lines.append(
' <script src="//cdn.jsdelivr.net/npm/prismjs/'
'components/prism-json.min.js"></script>\n')
lines.append(
' <script src="//cdn.jsdelivr.net/npm/docsify-kate'
'x@latest/dist/docsify-katex.js"></script>\n')
lines.append(
' <link rel="stylesheet" href="//cdn.jsdelivr.net/'
'npm/katex@latest/dist/katex.min.css"/>\n')
with open(index_path, 'w') as f:
f.writelines(lines)
def release_doc(self, args):
project = get_active_project()
config = read_config(project)
access_token = config_get(
config.get, DEFAULT_TOKEN_SECTION, 'access_token')
endpoint = config_get(config.get, DEFAULT_OAUTH2_SECTION, 'endpoint')
api = API(
access_token, endpoint=endpoint, timeout=DEFAULT_MODEL_TIMEOUT)
input_value = input('?请输入需要发布的 json 文件路径:')
if input_value:
if not exists(input_value):
output('文件路径:{} 有误,请检查。'.format(input_value))
sys.exit(1)
doc_data = json.load(open(input_value))
doc_tab = doc_data['doc_tab']
model_id = doc_data['model_id']
doc_content = doc_data['doc_content']
try:
result = api.upload_model_doc(doc_tab, model_id, doc_content)
except APIError as e:
output('模型文档上传失败:{}'.format(e))
sys.exit(1)
output('模型文档上传结果:')
output(json.dumps(result.json(), indent=4, ensure_ascii=False))
else:
output('请输入需要预览的文档路径')
sys.exit(1)
|
from broker.providers.decoder import DecoderProvider
from fvhiot.parsers.dlmbx import decode_hex
class DlmbxDecoder(DecoderProvider):
description = 'Decode Digital matter MBX payload'
def decode_payload(self, hex_payload, port, **kwargs):
data = decode_hex(hex_payload, port)
# TODO: remove dl_id, protocol keys?
return data
|
import warnings
from json import loads as json_loads
from os import fsync
from sys import exc_info
from json_tricks.utils import is_py3, dict_default, gzip_compress, gzip_decompress, JsonTricksDeprecation
from .utils import str_type, NoNumpyException # keep 'unused' imports
from .comment import strip_comments # keep 'unused' imports
#TODO @mark: imports removed?
from .encoders import TricksEncoder, json_date_time_encode, \
class_instance_encode, json_complex_encode, json_set_encode, numeric_types_encode, numpy_encode, \
nonumpy_encode, nopandas_encode, pandas_encode, noenum_instance_encode, \
enum_instance_encode, pathlib_encode # keep 'unused' imports
from .decoders import TricksPairHook, \
json_date_time_hook, ClassInstanceHook, \
json_complex_hook, json_set_hook, numeric_types_hook, json_numpy_obj_hook, \
json_nonumpy_obj_hook, \
nopandas_hook, pandas_hook, EnumInstanceHook, \
noenum_hook, pathlib_hook, nopathlib_hook # keep 'unused' imports
ENCODING = 'UTF-8'
_cih_instance = ClassInstanceHook()
_eih_instance = EnumInstanceHook()
DEFAULT_ENCODERS = [json_date_time_encode, json_complex_encode, json_set_encode,
numeric_types_encode, class_instance_encode, ]
DEFAULT_HOOKS = [json_date_time_hook, json_complex_hook, json_set_hook,
numeric_types_hook, _cih_instance, ]
#TODO @mark: add properties to all built-in encoders (for speed - but it should keep working without)
try:
import enum
except ImportError:
DEFAULT_ENCODERS = [noenum_instance_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [noenum_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [enum_instance_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [_eih_instance,] + DEFAULT_HOOKS
try:
import numpy
except ImportError:
DEFAULT_ENCODERS = [nonumpy_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [json_nonumpy_obj_hook,] + DEFAULT_HOOKS
else:
# numpy encode needs to be before complex
DEFAULT_ENCODERS = [numpy_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [json_numpy_obj_hook,] + DEFAULT_HOOKS
try:
import pandas
except ImportError:
DEFAULT_ENCODERS = [nopandas_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [nopandas_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [pandas_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [pandas_hook,] + DEFAULT_HOOKS
try:
import pathlib
except:
# No need to include a "nopathlib_encode" hook since we would not encounter
# the Path object if pathlib isn't available. However, we *could* encounter
# a serialized Path object (produced by a version of Python with pathlib).
DEFAULT_HOOKS = [nopathlib_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [pathlib_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [pathlib_hook,] + DEFAULT_HOOKS
DEFAULT_NONP_ENCODERS = [nonumpy_encode,] + DEFAULT_ENCODERS # DEPRECATED
DEFAULT_NONP_HOOKS = [json_nonumpy_obj_hook,] + DEFAULT_HOOKS # DEPRECATED
def dumps(obj, sort_keys=None, cls=None, obj_encoders=DEFAULT_ENCODERS, extra_obj_encoders=(),
primitives=False, compression=None, allow_nan=False, conv_str_byte=False, fallback_encoders=(),
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param obj: The Python object to convert.
:param sort_keys: Keep this False if you want order to be preserved.
:param cls: The json encoder class to use, defaults to NoNumpyEncoder which gives a warning for numpy arrays.
:param obj_encoders: Iterable of encoders to use to convert arbitrary objects into json-able promitives.
:param extra_obj_encoders: Like `obj_encoders` but on top of them: use this to add encoders without replacing defaults. Since v3.5 these happen before default encoders.
:param fallback_encoders: These are extra `obj_encoders` that 1) are ran after all others and 2) only run if the object hasn't yet been changed.
:param allow_nan: Allow NaN and Infinity values, which is a (useful) violation of the JSON standard (default False).
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:param properties: A dictionary of properties that is passed to each encoder that will accept it.
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to `cls`. Note that `sort_keys` should be false if you want to preserve order.
"""
if not hasattr(extra_obj_encoders, '__iter__'):
raise TypeError('`extra_obj_encoders` should be a tuple in `json_tricks.dump(s)`')
encoders = tuple(extra_obj_encoders) + tuple(obj_encoders)
properties = properties or {}
dict_default(properties, 'primitives', primitives)
dict_default(properties, 'compression', compression)
dict_default(properties, 'allow_nan', allow_nan)
if cls is None:
cls = TricksEncoder
txt = cls(sort_keys=sort_keys, obj_encoders=encoders, allow_nan=allow_nan,
primitives=primitives, fallback_encoders=fallback_encoders,
properties=properties, **jsonkwargs).encode(obj)
if not is_py3 and isinstance(txt, str):
txt = unicode(txt, ENCODING)
if not compression:
return txt
if compression is True:
compression = 5
txt = txt.encode(ENCODING)
gzstring = gzip_compress(txt, compresslevel=compression)
return gzstring
def dump(obj, fp, sort_keys=None, cls=None, obj_encoders=DEFAULT_ENCODERS, extra_obj_encoders=(),
primitives=False, compression=None, force_flush=False, allow_nan=False, conv_str_byte=False,
fallback_encoders=(), properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param fp: File handle or path to write to.
:param compression: The gzip compression level, or None for no compression.
:param force_flush: If True, flush the file handle used, when possibly also in the operating system (default False).
The other arguments are identical to `dumps`.
"""
if (isinstance(obj, str_type) or hasattr(obj, 'write')) and isinstance(fp, (list, dict)):
raise ValueError('json-tricks dump arguments are in the wrong order: provide the data to be serialized before file handle')
txt = dumps(obj, sort_keys=sort_keys, cls=cls, obj_encoders=obj_encoders, extra_obj_encoders=extra_obj_encoders,
primitives=primitives, compression=compression, allow_nan=allow_nan, conv_str_byte=conv_str_byte,
fallback_encoders=fallback_encoders, properties=properties, **jsonkwargs)
if isinstance(fp, str_type):
if compression:
fh = open(fp, 'wb+')
else:
fh = open(fp, 'w+')
else:
fh = fp
if conv_str_byte:
try:
fh.write(b'')
except TypeError:
pass
# if not isinstance(txt, str_type):
# # Cannot write bytes, so must be in text mode, but we didn't get a text
# if not compression:
# txt = txt.decode(ENCODING)
else:
try:
fh.write(u'')
except TypeError:
if isinstance(txt, str_type):
txt = txt.encode(ENCODING)
try:
if compression and 'b' not in getattr(fh, 'mode', 'b?') and not isinstance(txt, str_type):
raise IOError('If compression is enabled, the file must be opened in binary mode.')
try:
fh.write(txt)
except TypeError as err:
err.args = (err.args[0] + '. A possible reason is that the file is not opened in binary mode; '
'be sure to set file mode to something like "wb".',)
raise
finally:
if force_flush:
fh.flush()
try:
if fh.fileno() is not None:
fsync(fh.fileno())
except (ValueError,):
pass
if isinstance(fp, str_type):
fh.close()
return txt
def loads(string, preserve_order=True, ignore_comments=None, decompression=None, obj_pairs_hooks=DEFAULT_HOOKS,
extra_obj_pairs_hooks=(), cls_lookup_map=None, allow_duplicates=True, conv_str_byte=False,
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param string: The string containing a json encoded data structure.
:param decode_cls_instances: True to attempt to decode class instances (requires the environment to be similar the the encoding one).
:param preserve_order: Whether to preserve order by using OrderedDicts or not.
:param ignore_comments: Remove comments (starting with # or //).
:param decompression: True to use gzip decompression, False to use raw data, None to automatically determine (default). Assumes utf-8 encoding!
:param obj_pairs_hooks: A list of dictionary hooks to apply.
:param extra_obj_pairs_hooks: Like `obj_pairs_hooks` but on top of them: use this to add hooks without replacing defaults. Since v3.5 these happen before default hooks.
:param cls_lookup_map: If set to a dict, for example ``globals()``, then classes encoded from __main__ are looked up this dict.
:param allow_duplicates: If set to False, an error will be raised when loading a json-map that contains duplicate keys.
:param parse_float: A function to parse strings to integers (e.g. Decimal). There is also `parse_int`.
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to json_func.
"""
if not hasattr(extra_obj_pairs_hooks, '__iter__'):
raise TypeError('`extra_obj_pairs_hooks` should be a tuple in `json_tricks.load(s)`')
if decompression is None:
decompression = isinstance(string, bytes) and string[:2] == b'\x1f\x8b'
if decompression:
string = gzip_decompress(string).decode(ENCODING)
if not isinstance(string, str_type):
if conv_str_byte:
string = string.decode(ENCODING)
else:
raise TypeError(('The input was of non-string type "{0:}" in `json_tricks.load(s)`. '
'Bytes cannot be automatically decoding since the encoding is not known. Recommended '
'way is to instead encode the bytes to a string and pass that string to `load(s)`, '
'for example bytevar.encode("utf-8") if utf-8 is the encoding. Alternatively you can '
'force an attempt by passing conv_str_byte=True, but this may cause decoding issues.')
.format(type(string)))
if ignore_comments or ignore_comments is None:
new_string = strip_comments(string)
if ignore_comments is None and not getattr(loads, '_ignore_comments_warned', False) and string != new_string:
warnings.warn('`json_tricks.load(s)` stripped some comments, but `ignore_comments` was '
'not passed; in the next major release, the behaviour when `ignore_comments` is not '
'passed will change; it is recommended to explicitly pass `ignore_comments=True` if '
'you want to strip comments; see https://github.com/mverleg/pyjson_tricks/issues/74',
JsonTricksDeprecation)
loads._ignore_comments_warned = True
string = new_string
properties = properties or {}
dict_default(properties, 'preserve_order', preserve_order)
dict_default(properties, 'ignore_comments', ignore_comments)
dict_default(properties, 'decompression', decompression)
dict_default(properties, 'cls_lookup_map', cls_lookup_map)
dict_default(properties, 'allow_duplicates', allow_duplicates)
hooks = tuple(extra_obj_pairs_hooks) + tuple(obj_pairs_hooks)
hook = TricksPairHook(ordered=preserve_order, obj_pairs_hooks=hooks, allow_duplicates=allow_duplicates, properties=properties)
return json_loads(string, object_pairs_hook=hook, **jsonkwargs)
def load(fp, preserve_order=True, ignore_comments=None, decompression=None, obj_pairs_hooks=DEFAULT_HOOKS,
extra_obj_pairs_hooks=(), cls_lookup_map=None, allow_duplicates=True, conv_str_byte=False,
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param fp: File handle or path to load from.
The other arguments are identical to loads.
"""
try:
if isinstance(fp, str_type):
if decompression is not None:
open_binary = bool(decompression)
else:
with open(fp, 'rb') as fh:
# This attempts to detect gzip mode; gzip should always
# have this header, and text json can't have it.
open_binary = (fh.read(2) == b'\x1f\x8b')
with open(fp, 'rb' if open_binary else 'r') as fh:
string = fh.read()
else:
string = fp.read()
except UnicodeDecodeError as err:
# todo: not covered in tests, is it relevant?
raise Exception('There was a problem decoding the file content. A possible reason is that the file is not ' +
'opened in binary mode; be sure to set file mode to something like "rb".').with_traceback(exc_info()[2])
return loads(string, preserve_order=preserve_order, ignore_comments=ignore_comments, decompression=decompression,
obj_pairs_hooks=obj_pairs_hooks, extra_obj_pairs_hooks=extra_obj_pairs_hooks, cls_lookup_map=cls_lookup_map,
allow_duplicates=allow_duplicates, conv_str_byte=conv_str_byte, properties=properties, **jsonkwargs)
|
from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
num_circuits = 1
num_qubits = 3
p = ql.Program('benstein_vazirani_1b_secret_8', platform, num_qubits)
k = ql.Kernel('benstein_vazirani_1b_secret_8', platform, num_qubits)
k.gate('x',[1])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[0])
k.gate('h',[1])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='custom-interface',
version='0.0.1',
author='Rafael',
author_email='contact@rafagomes.com',
maintainer='Rafael',
maintainer_email='contact@rafagomes.com',
license='Apache Software License 2.0',
url='https://github.com/bond-challenge/software-engineering',
description='Sample of an interface for a data structure',
long_description=read('README.rst'),
py_modules=['interface'],
python_requires='>=3.9',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
],
)
|
import argparse
import warnings
import logging
import os
from imp import reload
from google.protobuf import text_format
from onnx import defs
import tensorflow as tf
from onnx_tf.common import get_output_node_names
from onnx_tf.common.handler_helper import get_all_frontend_handlers
from onnx_tf.common.handler_helper import get_frontend_coverage
from onnx_tf.pb_wrapper import TensorflowNode
warnings.filterwarnings('ignore')
reload(logging)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def parse_args(args):
parser = argparse.ArgumentParser(
description=
'This is the checker to check whether model graph '\
'operators are supported by the frontend handlers'
)
parser.add_argument(
"--infile",
"-i",
help="Input file path, can be pb, pbtxt, or ckpt file.",
required=True)
return parser.parse_args(args)
def check_opr_support(graph_def):
""" Check support for operators in graph
:param graph_def: the graph of operations
:return: whether all operators are supported, supported operators in graph
"""
logger.info('Checking for unsupported operators...')
node_dict = set()
unsupported = set()
supported = set()
for node in graph_def.node:
node_dict.add(node.op)
node_dict.discard('Placeholder')
node_dict.discard('Const')
logger.info('There are %s unique operators in the model file.',
str(len(node_dict)))
frontend_tf_coverage = get_frontend_coverage().get('tf_coverage')
frontend_tf_opset_dict = frontend_tf_coverage.get(defs.ONNX_DOMAIN, {})
for k in node_dict:
if k not in frontend_tf_opset_dict:
unsupported.add(k)
else:
supported.add(k)
if unsupported:
logger.info(
'There are %s operators currently not supported by the '\
'ONNX-Tensorflow frontend for your model.',
str(len(unsupported)))
logger.info(unsupported)
return False, supported
logger.info('All operators in the model are supported!')
return True, supported
def check_node_args(graph_def, supported):
""" Check for required node arguments in graph
:param graph_def: the graph of operations
:param supported: the supported operators in graph
:return: whether all required parameters are provided
"""
logger.info('Checking for required node arguments...')
opset_dict = {}
opset_dict[defs.ONNX_DOMAIN] = defs.onnx_opset_version()
handlers = get_all_frontend_handlers(opset_dict)
total_nodes = 0
failed_nodes = 0
for node in graph_def.node:
if node.op in supported:
total_nodes += 1
tf_node = TensorflowNode(node)
kwargs = {}
for inp in node.input:
for attr_node in graph_def.node:
if inp == attr_node.name:
kwargs[inp] = attr_node.attr['value']
break
handler = handlers.get(defs.ONNX_DOMAIN, {}).get(node.op, None)
try:
handler.args_check(tf_node, consts=kwargs)
except Exception as e:
logger.info(e)
failed_nodes += 1
logger.info('We checked %d supported nodes for required arguments.',
total_nodes)
logger.info(' # of nodes passed the args check: %d',
total_nodes - failed_nodes)
logger.info(' # of nodes failed the args check: %d', failed_nodes)
return failed_nodes == 0
def load_graph_from_ckpt(ckpt_file):
""" Load graph from a checkpoint
:param ckpt_file: the checkpoint file
:return: graph of operations extracted from the checkpoint
"""
latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_file))
saver = tf.train.import_meta_graph(latest_ckpt + ".meta")
with tf.Session() as sess:
sess.run(
[tf.global_variables_initializer(),
tf.local_variables_initializer()])
saver.restore(sess, latest_ckpt)
output_node_names = get_output_node_names(sess.graph.as_graph_def())
graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(add_shapes=True), output_node_names)
return graph_def
def check(graphfile):
ext = os.path.splitext(graphfile)[1]
graph_def = tf.GraphDef()
if ext == ".pb":
with tf.gfile.GFile(graphfile, "rb") as f:
graph_def.ParseFromString(f.read())
elif ext == ".pbtxt":
with tf.gfile.GFile(graphfile, "rb") as f:
text_format.Merge(f.read(), graph_def)
elif ext == ".ckpt":
graph_def = load_graph_from_ckpt(graphfile)
else:
raise ValueError(
'Input file is not supported. Should be .pb, .pbtxt, or .ckpt, '\
'but get {}.'.format(ext))
op_passed, supported = check_opr_support(graph_def)
args_passed = check_node_args(graph_def, supported)
if op_passed and args_passed:
logger.info("Your model is good to go!")
else:
logger.info("Some work is needed before we can export your model to ONNX.")
def main(args):
args = parse_args(args)
check(args.infile)
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import pytest
import random
import sys
import time
import reframe
import reframe.core.fields as fields
import reframe.core.runtime as rt
import reframe.utility as util
import reframe.utility.jsonext as jsonext
import reframe.utility.osext as osext
import reframe.utility.sanity as sn
import unittests.utility as test_util
from reframe.core.exceptions import (ConfigError,
SpawnedProcessError,
SpawnedProcessTimeout)
def test_command_success():
completed = osext.run_command('echo foobar')
assert completed.returncode == 0
assert completed.stdout == 'foobar\n'
def test_command_success_cmd_seq():
completed = osext.run_command(['echo', 'foobar'])
assert completed.returncode == 0
assert completed.stdout == 'foobar\n'
def test_command_error():
with pytest.raises(SpawnedProcessError,
match=r"command 'false' failed with exit code 1"):
osext.run_command('false', check=True)
def test_command_error_cmd_seq():
with pytest.raises(SpawnedProcessError,
match=r"command 'false' failed with exit code 1"):
osext.run_command(['false'], check=True)
def test_command_timeout():
with pytest.raises(
SpawnedProcessTimeout, match=r"command 'sleep 3' timed out "
r'after 2s') as exc_info:
osext.run_command('sleep 3', timeout=2)
assert exc_info.value.timeout == 2
# Try to get the string repr. of the exception: see bug #658
str(exc_info.value)
def test_command_stdin(tmp_path):
with open(tmp_path / 'in.txt', 'w') as fp:
fp.write('hello')
with open(tmp_path / 'in.txt') as fp:
completed = osext.run_command('cat', stdin=fp)
assert completed.stdout == 'hello'
def test_command_async():
t_launch = time.time()
t_sleep = t_launch
proc = osext.run_command_async('sleep 1')
t_launch = time.time() - t_launch
proc.wait()
t_sleep = time.time() - t_sleep
# Now check the timings
assert t_launch < 1
assert t_sleep >= 1
def test_copytree(tmp_path):
dir_src = tmp_path / 'src'
dir_src.mkdir()
dir_dst = tmp_path / 'dst'
dir_dst.mkdir()
osext.copytree(str(dir_src), str(dir_dst), dirs_exist_ok=True)
def test_copytree_src_parent_of_dst(tmp_path):
dst_path = tmp_path / 'dst'
src_path = (dst_path / '..').resolve()
with pytest.raises(ValueError):
osext.copytree(str(src_path), str(dst_path))
@pytest.fixture(params=['dirs_exist_ok=True', 'dirs_exist_ok=False'])
def dirs_exist_ok(request):
return 'True' in request.param
def test_copytree_dst_notdir(tmp_path, dirs_exist_ok):
dir_src = tmp_path / 'src'
dir_src.mkdir()
dst = tmp_path / 'dst'
dst.touch()
with pytest.raises(FileExistsError, match=fr'{dst}'):
osext.copytree(str(dir_src), str(dst), dirs_exist_ok=dirs_exist_ok)
def test_copytree_src_notdir(tmp_path, dirs_exist_ok):
src = tmp_path / 'src'
src.touch()
dst = tmp_path / 'dst'
dst.mkdir()
with pytest.raises(NotADirectoryError, match=fr'{src}'):
osext.copytree(str(src), str(dst), dirs_exist_ok=dirs_exist_ok)
def test_copytree_src_does_not_exist(tmp_path, dirs_exist_ok):
src = tmp_path / 'src'
dst = tmp_path / 'dst'
dst.mkdir()
with pytest.raises(FileNotFoundError, match=fr'{src}'):
osext.copytree(str(src), str(dst), dirs_exist_ok=dirs_exist_ok)
@pytest.fixture
def rmtree(tmp_path):
testdir = tmp_path / 'test'
testdir.mkdir()
with open(os.path.join(str(testdir), 'foo.txt'), 'w') as fp:
fp.write('hello\n')
def _rmtree(*args, **kwargs):
osext.rmtree(testdir, *args, **kwargs)
assert not os.path.exists(testdir)
return _rmtree
def test_rmtree(rmtree):
rmtree()
def test_rmtree_onerror(rmtree):
rmtree(onerror=lambda *args: None)
def test_rmtree_error(tmp_path):
# Try to remove an inexistent directory
testdir = tmp_path / 'tmp'
testdir.mkdir()
os.rmdir(str(testdir))
with pytest.raises(OSError):
osext.rmtree(testdir)
def test_inpath():
assert osext.inpath('/foo/bin', '/bin:/foo/bin:/usr/bin')
assert not osext.inpath('/foo/bin', '/bin:/usr/local/bin')
@pytest.fixture
def tempdirs(tmp_path):
# Create a temporary directory structure
# foo/
# bar/
# boo/
# goo/
# loo/
# bar/
prefix = tmp_path / 'prefix'
(prefix / 'foo' / 'bar' / 'boo').mkdir(parents=True)
(prefix / 'foo' / 'goo').mkdir()
(prefix / 'loo' / 'bar').mkdir(parents=True)
return prefix
def test_subdirs(tempdirs):
# Try to fool the algorithm by adding normal files
prefix_name = str(tempdirs)
open(os.path.join(prefix_name, 'foo', 'bar', 'file.txt'), 'w').close()
open(os.path.join(prefix_name, 'loo', 'file.txt'), 'w').close()
expected_subdirs = {prefix_name,
os.path.join(prefix_name, 'foo'),
os.path.join(prefix_name, 'foo', 'bar'),
os.path.join(prefix_name, 'foo', 'bar', 'boo'),
os.path.join(prefix_name, 'foo', 'goo'),
os.path.join(prefix_name, 'loo'),
os.path.join(prefix_name, 'loo', 'bar')}
returned_subdirs = osext.subdirs(prefix_name)
assert [prefix_name] == returned_subdirs
returned_subdirs = osext.subdirs(prefix_name, recurse=True)
assert expected_subdirs == set(returned_subdirs)
def test_samefile(tempdirs):
prefix_name = str(tempdirs)
# Try to fool the algorithm by adding symlinks
os.symlink(os.path.join(prefix_name, 'foo'),
os.path.join(prefix_name, 'foolnk'))
os.symlink(os.path.join(prefix_name, 'foolnk'),
os.path.join(prefix_name, 'foolnk1'))
# Create a broken link on purpose
os.symlink('/foo', os.path.join(prefix_name, 'broken'))
os.symlink(os.path.join(prefix_name, 'broken'),
os.path.join(prefix_name, 'broken1'))
assert osext.samefile('/foo', '/foo')
assert osext.samefile('/foo', '/foo/')
assert osext.samefile('/foo/bar', '/foo//bar/')
assert osext.samefile(os.path.join(prefix_name, 'foo'),
os.path.join(prefix_name, 'foolnk'))
assert osext.samefile(os.path.join(prefix_name, 'foo'),
os.path.join(prefix_name, 'foolnk1'))
assert not osext.samefile('/foo', '/bar')
assert osext.samefile('/foo', os.path.join(prefix_name, 'broken'))
assert osext.samefile(os.path.join(prefix_name, 'broken'),
os.path.join(prefix_name, 'broken1'))
def test_is_interactive(monkeypatch):
# Set `sys.ps1` to immitate an interactive session
monkeypatch.setattr(sys, 'ps1', 'rfm>>> ', raising=False)
assert osext.is_interactive()
def test_is_url():
repo_https = 'https://github.com/eth-cscs/reframe.git'
repo_ssh = 'git@github.com:eth-cscs/reframe.git'
assert osext.is_url(repo_https)
assert not osext.is_url(repo_ssh)
@pytest.fixture
def git_only():
try:
osext.run_command('git --version', check=True, log=False)
except (SpawnedProcessError, FileNotFoundError):
pytest.skip('no git installation found on system')
try:
osext.run_command('git status', check=True, log=False)
except (SpawnedProcessError, FileNotFoundError):
pytest.skip('not inside a git repository')
def test_git_repo_hash(git_only):
# A git branch hash consists of 8(short) or 40 characters.
assert len(osext.git_repo_hash()) == 8
assert len(osext.git_repo_hash(short=False)) == 40
assert osext.git_repo_hash(commit='invalid') is None
assert osext.git_repo_hash(commit='') is None
def test_git_repo_hash_no_git(git_only, monkeypatch):
# Emulate a system with no git installed
monkeypatch.setenv('PATH', '')
assert osext.git_repo_hash() is None
def test_git_repo_hash_no_git_repo(git_only, monkeypatch, tmp_path):
# Emulate trying to get the hash from somewhere where there is no repo
monkeypatch.setenv('GIT_DIR', str(tmp_path))
assert osext.git_repo_hash() is None
def test_git_repo_exists(git_only):
assert osext.git_repo_exists('https://github.com/eth-cscs/reframe.git',
timeout=10)
assert not osext.git_repo_exists('reframe.git', timeout=10)
assert not osext.git_repo_exists('https://github.com/eth-cscs/xxx',
timeout=10)
def test_force_remove_file(tmp_path):
fp = tmp_path / 'tmp_file'
fp.touch()
fp_name = str(fp)
assert os.path.exists(fp_name)
osext.force_remove_file(fp_name)
assert not os.path.exists(fp_name)
# Try to remove a non-existent file
osext.force_remove_file(fp_name)
def test_expandvars_dollar():
text = 'Hello, $(echo World)'
assert 'Hello, World' == osext.expandvars(text)
# Test nested expansion
text = '$(echo Hello, $(echo World))'
assert 'Hello, World' == osext.expandvars(text)
def test_expandvars_backticks():
text = 'Hello, `echo World`'
assert 'Hello, World' == osext.expandvars(text)
# Test nested expansion
text = '`echo Hello, `echo World``'
assert 'Hello, World' == osext.expandvars(text)
def test_expandvars_mixed_syntax():
text = '`echo Hello, $(echo World)`'
assert 'Hello, World' == osext.expandvars(text)
text = '$(echo Hello, `echo World`)'
assert 'Hello, World' == osext.expandvars(text)
def test_expandvars_error():
text = 'Hello, $(foo)'
with pytest.raises(SpawnedProcessError):
osext.expandvars(text)
def test_strange_syntax():
text = 'Hello, $(foo`'
assert 'Hello, $(foo`' == osext.expandvars(text)
text = 'Hello, `foo)'
assert 'Hello, `foo)' == osext.expandvars(text)
def test_expandvars_nocmd(monkeypatch):
monkeypatch.setenv('FOO', 'World')
text = 'Hello, $FOO'
assert 'Hello, World' == osext.expandvars(text)
text = 'Hello, ${FOO}'
assert 'Hello, World' == osext.expandvars(text)
@pytest.fixture
def direntries(tmp_path):
# Create a test directory structure
#
# prefix/
# bar/
# bar.txt
# foo.txt
# foobar.txt
# foo/
# bar.txt
# bar.txt
# foo.txt
#
prefix = tmp_path / 'prefix'
target = tmp_path / 'target'
prefix.mkdir()
target.mkdir()
(prefix / 'bar').mkdir(parents=True)
(prefix / 'foo').mkdir(parents=True)
(prefix / 'bar' / 'bar.txt').touch()
(prefix / 'bar' / 'foo.txt').touch()
(prefix / 'bar' / 'foobar.txt').touch()
(prefix / 'foo' / 'bar.txt').touch()
(prefix / 'bar.txt').touch()
(prefix / 'foo.txt').touch()
# Create also a subdirectory in target, so as to check the recursion
(target / 'foo').mkdir(parents=True)
return prefix.resolve(), target.resolve()
def assert_target_directory(src_prefix, dst_prefix, file_links=[]):
'''Verify the directory structure'''
assert os.path.exists(dst_prefix / 'bar' / 'bar.txt')
assert os.path.exists(dst_prefix / 'bar' / 'foo.txt')
assert os.path.exists(dst_prefix / 'bar' / 'foobar.txt')
assert os.path.exists(dst_prefix / 'foo' / 'bar.txt')
assert os.path.exists(dst_prefix / 'bar.txt')
assert os.path.exists(dst_prefix / 'foo.txt')
# Verify the symlinks
for lf in file_links:
target_link_name = os.path.abspath(src_prefix / lf)
link_name = os.path.abspath(dst_prefix / lf)
assert os.path.islink(link_name)
assert target_link_name == os.readlink(link_name)
def test_virtual_copy_nolinks(direntries):
osext.copytree_virtual(*direntries, dirs_exist_ok=True)
assert_target_directory(*direntries)
def test_virtual_copy_nolinks_dirs_exist(direntries):
with pytest.raises(FileExistsError):
osext.copytree_virtual(*direntries)
def test_virtual_copy_valid_links(direntries):
file_links = ['bar/', 'foo/bar.txt', 'foo.txt']
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
assert_target_directory(*direntries, file_links)
def test_virtual_copy_inexistent_links(direntries):
file_links = ['foobar/', 'foo/bar.txt', 'foo.txt']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
def test_virtual_copy_absolute_paths(direntries):
file_links = [direntries[0] / 'bar', 'foo/bar.txt', 'foo.txt']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
def test_virtual_copy_irrelevant_paths(direntries):
file_links = ['/bin', 'foo/bar.txt', 'foo.txt']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
file_links = [os.path.dirname(direntries[0]), 'foo/bar.txt', 'foo.txt']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
def test_virtual_copy_linkself(direntries):
file_links = ['.']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
def test_virtual_copy_linkparent(direntries):
file_links = ['..']
with pytest.raises(ValueError):
osext.copytree_virtual(*direntries, file_links, dirs_exist_ok=True)
@pytest.fixture(params=['symlinks=True', 'symlinks=False'])
def symlinks(request):
return 'True' in request.param
def test_virtual_copy_symlinks_dirs_exist(tmp_path, symlinks):
src = tmp_path / 'src'
src.mkdir()
dst = tmp_path / 'dst'
dst.mkdir()
foo = src / 'foo'
foo.touch()
foo_link = src / 'foo.link'
foo_link.symlink_to(foo)
osext.copytree_virtual(src, dst, symlinks=symlinks, dirs_exist_ok=True)
assert (dst / 'foo').exists()
assert (dst / 'foo.link').exists()
assert (dst / 'foo.link').is_symlink() == symlinks
def test_import_from_file_load_relpath():
module = util.import_module_from_file('reframe/__init__.py')
assert reframe.VERSION == module.VERSION
assert 'reframe' == module.__name__
assert module is sys.modules.get('reframe')
def test_import_from_file_load_directory():
module = util.import_module_from_file('reframe')
assert reframe.VERSION == module.VERSION
assert 'reframe' == module.__name__
assert module is sys.modules.get('reframe')
def test_import_from_file_load_abspath():
filename = os.path.abspath('reframe/__init__.py')
module = util.import_module_from_file(filename)
assert reframe.VERSION == module.VERSION
assert 'reframe' == module.__name__
assert module is sys.modules.get('reframe')
def test_import_from_file_existing_module_name(tmp_path):
test_file = tmp_path / 'os.py'
with open(test_file, 'w') as fp:
print('var = 1', file=fp)
module = util.import_module_from_file(test_file)
assert module.var == 1
assert not hasattr(module, 'path')
assert hasattr(os, 'path')
def test_import_from_file_load_directory_relative():
with osext.change_dir('reframe'):
module = util.import_module_from_file('../reframe')
assert reframe.VERSION == module.VERSION
assert 'reframe' == module.__name__
assert module is sys.modules.get('reframe')
def test_import_from_file_load_relative():
with osext.change_dir('reframe'):
# Load a module from a directory up
module = util.import_module_from_file('../reframe/__init__.py')
assert reframe.VERSION == module.VERSION
assert 'reframe' == module.__name__
assert module is sys.modules.get('reframe')
# Load a module from the current directory
module = util.import_module_from_file('utility/osext.py')
assert 'reframe.utility.osext' == module.__name__
assert module is sys.modules.get('reframe.utility.osext')
def test_import_from_file_load_twice():
filename = os.path.abspath('reframe')
module1 = util.import_module_from_file(filename)
module2 = util.import_module_from_file(filename)
assert module1 is module2
def test_import_from_file_load_namespace_package():
util.import_module_from_file('unittests/resources')
assert 'unittests' in sys.modules
assert 'unittests.resources' in sys.modules
def test_ppretty_simple_types():
assert util.ppretty(1) == repr(1)
assert util.ppretty(1.2) == repr(1.2)
assert util.ppretty('a string') == repr('a string')
assert util.ppretty([]) == '[]'
assert util.ppretty(()) == '()'
assert util.ppretty(set()) == 'set()'
assert util.ppretty({}) == '{}'
assert util.ppretty([1, 2, 3]) == '[\n 1,\n 2,\n 3\n]'
assert util.ppretty((1, 2, 3)) == '(\n 1,\n 2,\n 3\n)'
assert util.ppretty({1, 2, 3}) == '{\n 1,\n 2,\n 3\n}'
assert util.ppretty({'a': 1, 'b': 2}) == ("{\n"
" 'a': 1,\n"
" 'b': 2\n"
"}")
def test_ppretty_mixed_types():
assert (
util.ppretty(['a string', 2, 'another string']) ==
"[\n"
" 'a string',\n"
" 2,\n"
" 'another string'\n"
"]"
)
assert util.ppretty({'a': 1, 'b': (2, 3)}) == ("{\n"
" 'a': 1,\n"
" 'b': (\n"
" 2,\n"
" 3\n"
" )\n"
"}")
assert (
util.ppretty({'a': 1, 'b': {2: {3: 4, 5: {}}}, 'c': 6}) ==
"{\n"
" 'a': 1,\n"
" 'b': {\n"
" 2: {\n"
" 3: 4,\n"
" 5: {}\n"
" }\n"
" },\n"
" 'c': 6\n"
"}")
assert (
util.ppretty({'a': 2, 34: (2, 3),
'b': [[], [1.2, 3.4], {1, 2}]}) ==
"{\n"
" 'a': 2,\n"
" 34: (\n"
" 2,\n"
" 3\n"
" ),\n"
" 'b': [\n"
" [],\n"
" [\n"
" 1.2,\n"
" 3.4\n"
" ],\n"
" {\n"
" 1,\n"
" 2\n"
" }\n"
" ]\n"
"}"
)
def test_ppretty_obj_print():
class C:
def __repr__(self):
return '<class C>'
class D:
def __repr__(self):
return '<class D>'
c = C()
d = D()
assert util.ppretty(c) == '<class C>'
assert util.ppretty(['a', 'b', c, d]) == ("[\n"
" 'a',\n"
" 'b',\n"
" <class C>,\n"
" <class D>\n"
"]")
class _X:
def __init__(self):
self._a = False
class _Y:
def __init__(self, x, a=None):
self.x = x
self.y = 'foo'
self.z = self
self.a = a
def test_repr_default():
c0, c1 = _Y(1), _Y(2, _X())
s = util.repr([c0, c1])
assert s == f'''[
_Y({{
'x': 1,
'y': 'foo',
'z': _Y(...)@{hex(id(c0))},
'a': None
}})@{hex(id(c0))},
_Y({{
'x': 2,
'y': 'foo',
'z': _Y(...)@{hex(id(c1))},
'a': _X({{
'_a': False
}})@{hex(id(c1.a))}
}})@{hex(id(c1))}
]'''
def test_attrs():
class B:
z = fields.TypedField(int)
def __init__(self, x, y):
self.x = x
self.y = y
class C(B):
def __init__(self, x, y):
self._x = x
self.y = y
self.z = 3
def foo():
pass
@property
def x(self):
return self._x
class D(C):
pass
# Test undefined descriptors are not returned
b = B(-1, 0)
b_attrs = util.attrs(b)
assert b_attrs['x'] == -1
assert b_attrs['y'] == 0
assert 'z' not in b_attrs
c = C(1, 2)
c_attrs = util.attrs(c)
assert c_attrs['x'] == 1
assert c_attrs['y'] == 2
assert c_attrs['z'] == 3
assert 'foo' not in c_attrs
# Test inherited attributes
d = D(4, 5)
d_attrs = util.attrs(d)
assert d_attrs['x'] == 4
assert d_attrs['y'] == 5
assert d_attrs['z'] == 3
assert 'foo' not in d_attrs
def test_change_dir_working(tmpdir):
wd_save = os.getcwd()
with osext.change_dir(tmpdir):
assert os.getcwd() == tmpdir
assert os.getcwd() == wd_save
def test_exception_propagation(tmpdir):
wd_save = os.getcwd()
try:
with osext.change_dir(tmpdir):
raise RuntimeError
except RuntimeError:
assert os.getcwd() == wd_save
else:
pytest.fail('exception not propagated by the ctx manager')
def test_allx():
l1 = [1, 1, 1]
l2 = [True, False]
assert all(l1), util.allx(l1)
assert not all(l2), util.allx(l2)
assert not util.allx([])
assert util.allx(i for i in [1, 1, 1])
assert util.allx(i for i in range(1, 2))
assert not util.allx(i for i in range(1))
assert not util.allx(i for i in range(0))
with pytest.raises(TypeError):
util.allx(None)
def test_decamelize():
assert '' == util.decamelize('')
assert 'my_base_class' == util.decamelize('MyBaseClass')
assert 'my_base_class12' == util.decamelize('MyBaseClass12')
assert 'my_class_a' == util.decamelize('MyClass_A')
assert 'my_class' == util.decamelize('my_class')
with pytest.raises(TypeError):
util.decamelize(None)
with pytest.raises(TypeError):
util.decamelize(12)
def test_sanitize():
assert '' == util.toalphanum('')
assert 'ab12' == util.toalphanum('ab12')
assert 'ab1_2' == util.toalphanum('ab1_2')
assert 'ab1__2' == util.toalphanum('ab1**2')
assert 'ab__12_' == util.toalphanum('ab (12)')
with pytest.raises(TypeError):
util.toalphanum(None)
with pytest.raises(TypeError):
util.toalphanum(12)
def test_scoped_dict_construction():
d = {
'a': {'k1': 3, 'k2': 4},
'b': {'k3': 5}
}
namespace_dict = reframe.utility.ScopedDict()
namespace_dict = reframe.utility.ScopedDict(d)
# Change local dict and verify that the stored values are not affected
d['a']['k1'] = 10
d['b']['k3'] = 10
assert 3 == namespace_dict['a:k1']
assert 5 == namespace_dict['b:k3']
del d['b']
assert 'b:k3' in namespace_dict
with pytest.raises(TypeError):
reframe.utility.ScopedDict(1)
with pytest.raises(TypeError):
reframe.utility.ScopedDict({'a': 1, 'b': 2})
with pytest.raises(TypeError):
reframe.utility.ScopedDict([('a', 1), ('b', 2)])
with pytest.raises(TypeError):
reframe.utility.ScopedDict({'a': {1: 'k1'}, 'b': {2: 'k2'}})
def test_scoped_dict_contains():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
# Test simple lookup
assert 'a:k1' in scoped_dict
assert 'a:k2' in scoped_dict
assert 'a:k3' in scoped_dict
assert 'a:k4' in scoped_dict
assert 'a:b:k1' in scoped_dict
assert 'a:b:k2' in scoped_dict
assert 'a:b:k3' in scoped_dict
assert 'a:b:k4' in scoped_dict
assert 'a:b:c:k1' in scoped_dict
assert 'a:b:c:k2' in scoped_dict
assert 'a:b:c:k3' in scoped_dict
assert 'a:b:c:k4' in scoped_dict
# Test global scope
assert 'k1' in scoped_dict
assert 'k2' not in scoped_dict
assert 'k3' in scoped_dict
assert 'k4' in scoped_dict
assert ':k1' in scoped_dict
assert ':k2' not in scoped_dict
assert ':k3' in scoped_dict
assert ':k4' in scoped_dict
assert '*:k1' in scoped_dict
assert '*:k2' not in scoped_dict
assert '*:k3' in scoped_dict
assert '*:k4' in scoped_dict
# Try to get full scopes as keys
assert 'a' not in scoped_dict
assert 'a:b' not in scoped_dict
assert 'a:b:c' not in scoped_dict
assert 'a:b:c:d' not in scoped_dict
assert '*' not in scoped_dict
assert '' not in scoped_dict
def test_scoped_dict_iter_keys():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
expected_keys = [
'a:k1', 'a:k2',
'a:b:k1', 'a:b:k3',
'a:b:c:k2', 'a:b:c:k3',
'*:k1', '*:k3', '*:k4'
]
assert sorted(expected_keys) == sorted(k for k in scoped_dict.keys())
def test_scoped_dict_iter_items():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
expected_items = [
('a:k1', 1), ('a:k2', 2),
('a:b:k1', 3), ('a:b:k3', 4),
('a:b:c:k2', 5), ('a:b:c:k3', 6),
('*:k1', 7), ('*:k3', 9), ('*:k4', 10)
]
assert (sorted(expected_items) ==
sorted(item for item in scoped_dict.items()))
def test_scoped_dict_iter_values():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
expected_values = [1, 2, 3, 4, 5, 6, 7, 9, 10]
assert expected_values == sorted(v for v in scoped_dict.values())
def test_scoped_dict_key_resolution():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
assert 1 == scoped_dict['a:k1']
assert 2 == scoped_dict['a:k2']
assert 9 == scoped_dict['a:k3']
assert 10 == scoped_dict['a:k4']
assert 3 == scoped_dict['a:b:k1']
assert 2 == scoped_dict['a:b:k2']
assert 4 == scoped_dict['a:b:k3']
assert 10 == scoped_dict['a:b:k4']
assert 3 == scoped_dict['a:b:c:k1']
assert 5 == scoped_dict['a:b:c:k2']
assert 6 == scoped_dict['a:b:c:k3']
assert 10 == scoped_dict['a:b:c:k4']
# Test global scope
assert 7 == scoped_dict['k1']
with pytest.raises(KeyError):
scoped_dict['k2']
assert 9 == scoped_dict['k3']
assert 10 == scoped_dict['k4']
assert 7 == scoped_dict[':k1']
with pytest.raises(KeyError):
scoped_dict[':k2']
assert 9 == scoped_dict[':k3']
assert 10 == scoped_dict[':k4']
assert 7 == scoped_dict['*:k1']
with pytest.raises(KeyError):
scoped_dict['*:k2']
assert 9 == scoped_dict['*:k3']
assert 10 == scoped_dict['*:k4']
# Try to fool it, by requesting keys with scope names
with pytest.raises(KeyError):
scoped_dict['a']
with pytest.raises(KeyError):
scoped_dict['a:b']
with pytest.raises(KeyError):
scoped_dict['a:b:c']
with pytest.raises(KeyError):
scoped_dict['a:b:c:d']
with pytest.raises(KeyError):
scoped_dict['*']
with pytest.raises(KeyError):
scoped_dict['']
# Scopes must be requested with scope()
assert scoped_dict.scope('a') == {'k1': 1, 'k2': 2, 'k3': 9, 'k4': 10}
assert scoped_dict.scope('a:b') == {'k1': 3, 'k2': 2, 'k3': 4, 'k4': 10}
assert scoped_dict.scope('a:b:c') == {'k1': 3, 'k2': 5, 'k3': 6, 'k4': 10}
assert scoped_dict.scope('*') == {'k1': 7, 'k3': 9, 'k4': 10}
# This is resolved in scope 'a'
assert scoped_dict.scope('a:z') == {'k1': 1, 'k2': 2, 'k3': 9, 'k4': 10}
assert scoped_dict.scope(None) == {}
def test_scoped_dict_setitem():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
scoped_dict['a:k2'] = 20
scoped_dict['c:k2'] = 30
scoped_dict[':k4'] = 40
scoped_dict['*:k5'] = 50
scoped_dict['k6'] = 60
assert 20 == scoped_dict['a:k2']
assert 30 == scoped_dict['c:k2']
assert 40 == scoped_dict[':k4']
assert 50 == scoped_dict['k5']
assert 60 == scoped_dict['k6']
def test_scoped_dict_delitem():
scoped_dict = reframe.utility.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
# delete key
del scoped_dict['a:k1']
assert 7 == scoped_dict['a:k1']
# delete key from global scope
del scoped_dict['k1']
assert 9 == scoped_dict['k3']
assert 10 == scoped_dict['k4']
with pytest.raises(KeyError):
scoped_dict['k1']
# delete a whole scope
del scoped_dict['*']
with pytest.raises(KeyError):
scoped_dict[':k4']
with pytest.raises(KeyError):
scoped_dict['a:k3']
# try to delete a non-existent key
with pytest.raises(KeyError):
del scoped_dict['a:k4']
# test deletion of parent scope keeping a nested one
scoped_dict = reframe.utility.ScopedDict()
scoped_dict['s0:k0'] = 1
scoped_dict['s0:s1:k0'] = 2
scoped_dict['*:k0'] = 3
del scoped_dict['s0']
assert 3 == scoped_dict['s0:k0']
assert 2 == scoped_dict['s0:s1:k0']
def test_scoped_dict_scope_key_name_pseudoconflict():
scoped_dict = reframe.utility.ScopedDict({
's0': {'s1': 1},
's0:s1': {'k0': 2}
})
assert 1 == scoped_dict['s0:s1']
assert 2 == scoped_dict['s0:s1:k0']
del scoped_dict['s0:s1']
assert 2 == scoped_dict['s0:s1:k0']
with pytest.raises(KeyError):
scoped_dict['s0:s1']
def test_scoped_dict_update():
scoped_dict = util.ScopedDict({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
scoped_dict_alt = util.ScopedDict({'a': {'k1': 3, 'k2': 5}})
scoped_dict_alt.update({
'a': {'k1': 1, 'k2': 2},
'a:b': {'k1': 3, 'k3': 4},
'a:b:c': {'k2': 5, 'k3': 6},
'*': {'k1': 7, 'k3': 9, 'k4': 10}
})
assert scoped_dict == scoped_dict_alt
def test_sequence_view():
l = util.SequenceView([1, 2, 2])
assert 1 == l[0]
assert 3 == len(l)
assert 2 in l
assert l == [1, 2, 2]
assert l == util.SequenceView([1, 2, 2])
assert list(reversed(l)) == [2, 2, 1]
assert 1 == l.index(2)
assert 2 == l.count(2)
assert str(l) == str([1, 2, 2])
# Assert immutability
m = l + [3, 4]
assert [1, 2, 2, 3, 4] == m
assert isinstance(m, list)
m_orig = m = util.SequenceView([1])
m += [3, 4]
assert m is not m_orig
assert [1] == m_orig
assert [1, 3, 4] == m
assert isinstance(m, list)
n = m + l
assert [1, 3, 4, 1, 2, 2] == n
assert isinstance(n, list)
with pytest.raises(TypeError):
l[1] = 3
with pytest.raises(TypeError):
l[1:2] = [3]
with pytest.raises(TypeError):
l *= 3
with pytest.raises(TypeError):
del l[:1]
with pytest.raises(AttributeError):
l.append(3)
with pytest.raises(AttributeError):
l.clear()
with pytest.raises(AttributeError):
l.copy()
with pytest.raises(AttributeError):
l.extend([3, 4])
with pytest.raises(AttributeError):
l.insert(1, 4)
with pytest.raises(AttributeError):
l.pop()
with pytest.raises(AttributeError):
l.remove(2)
with pytest.raises(AttributeError):
l.reverse()
def test_mapping_view():
d = util.MappingView({'a': 1, 'b': 2})
assert 1 == d['a']
assert 2 == len(d)
assert {'a': 1, 'b': 2} == dict(d)
assert 'b' in d
assert {'a', 'b'} == set(d.keys())
assert {1, 2} == set(d.values())
assert {('a', 1), ('b', 2)} == set(d.items())
assert 2 == d.get('b')
assert 3 == d.get('c', 3)
assert {'a': 1, 'b': 2} == d
assert d == util.MappingView({'b': 2, 'a': 1})
assert str(d) == str({'a': 1, 'b': 2})
assert {'a': 1, 'b': 2, 'c': 3} != d
# Assert immutability
with pytest.raises(TypeError):
d['c'] = 3
with pytest.raises(TypeError):
del d['b']
with pytest.raises(AttributeError):
d.pop('a')
with pytest.raises(AttributeError):
d.popitem()
with pytest.raises(AttributeError):
d.clear()
with pytest.raises(AttributeError):
d.update({'a': 4, 'b': 5})
with pytest.raises(AttributeError):
d.setdefault('c', 3)
@pytest.fixture
def random_seed():
random.seed(1)
def test_shortest_sequence():
s0 = 'abcde'
s1 = [1, 2, 3]
assert util.shortest(s0, s1) == s1
assert id(util.shortest(s0, s1)) == id(s1)
assert util.shortest(s0, s0) == s0
with pytest.raises(TypeError):
util.shortest(12)
with pytest.raises(TypeError):
util.shortest(x for x in range(10))
with pytest.raises(TypeError):
util.shortest([1], 2)
def test_longest_sequence():
s0 = 'abcde'
s1 = [1, 2, 3]
assert util.longest(s0, s1) == s0
assert id(util.longest(s0, s1)) == id(s0)
assert util.longest(s0, s0) == s0
with pytest.raises(TypeError):
util.longest(12)
with pytest.raises(TypeError):
util.longest(x for x in range(10))
with pytest.raises(TypeError):
util.longest([1], 2)
def test_ordered_set_construction(random_seed):
l = list(range(10))
random.shuffle(l)
s = util.OrderedSet(l + l)
assert len(s) == 10
for i in range(10):
assert i in s
assert list(s) == l
def test_ordered_set_construction_empty():
s = util.OrderedSet()
assert s == set()
assert set() == s
def test_ordered_set_str(random_seed):
l = list(range(10))
random.shuffle(l)
s = util.OrderedSet(l)
assert str(s) == str(l).replace('[', '{').replace(']', '}')
s = util.OrderedSet()
assert str(s) == type(s).__name__ + '()'
def test_ordered_set_construction_error():
with pytest.raises(TypeError):
util.OrderedSet(2)
with pytest.raises(TypeError):
util.OrderedSet(1, 2, 3)
def test_ordered_set_repr():
assert repr(util.OrderedSet('abc')) == "{'a', 'b', 'c'}"
assert str(util.OrderedSet('abc')) == "{'a', 'b', 'c'}"
def test_ordered_set_operators():
s0 = util.OrderedSet('abc')
s1 = util.OrderedSet('abced')
s2 = util.OrderedSet('ed')
assert s0 == set('abc')
assert s0 == util.OrderedSet('abc')
assert set('abc') == s0
assert util.OrderedSet('abc') == s0
assert s0 != s1
assert s1 != s0
assert s0 != util.OrderedSet('cab')
assert s0 < s1
assert s0 <= s1
assert s0 <= s0
assert s1 > s0
assert s1 >= s0
assert s1 >= s1
assert s0.issubset(s1)
assert s1.issuperset(s0)
assert (s0 & s1) == s0
assert (s0 & s2) == set()
assert (s0 | s2) == s1
assert (s1 - s0) == s2
assert (s2 - s0) == s2
assert (s0 ^ s1) == s2
assert s0.isdisjoint(s2)
assert not s0.isdisjoint(s1)
assert s0.symmetric_difference(s1) == s2
def test_ordered_set_union(random_seed):
l0 = list(range(10))
l1 = list(range(10, 20))
l2 = list(range(20, 30))
random.shuffle(l0)
random.shuffle(l1)
random.shuffle(l2)
s0 = util.OrderedSet(l0)
s1 = util.OrderedSet(l1)
s2 = util.OrderedSet(l2)
assert list(s0.union(s1, s2)) == l0 + l1 + l2
def test_ordered_set_intersection(random_seed):
l0 = list(range(10, 40))
l1 = list(range(20, 40))
l2 = list(range(20, 30))
random.shuffle(l0)
random.shuffle(l1)
random.shuffle(l2)
s0 = util.OrderedSet(l0)
s1 = util.OrderedSet(l1)
s2 = util.OrderedSet(l2)
# OrderedSet must keep the order of elements in s0
assert list(s0.intersection(s1, s2)) == [x for x in l0
if x >= 20 and x < 30]
def test_ordered_set_difference():
l0 = list(range(10, 40))
l1 = list(range(20, 40))
l2 = list(range(20, 30))
random.shuffle(l0)
random.shuffle(l1)
random.shuffle(l2)
s0 = util.OrderedSet(l0)
s1 = util.OrderedSet(l1)
s2 = util.OrderedSet(l2)
# OrderedSet must keep the order of elements in s0
assert list(s0.difference(s1, s2)) == [x for x in l0 if x >= 10 and x < 20]
def test_ordered_set_reversed():
l = list(range(10))
random.shuffle(l)
s = util.OrderedSet(l)
assert list(reversed(s)) == list(reversed(l))
def test_concat_files(tmpdir):
with osext.change_dir(tmpdir):
file1 = 'in1.txt'
file2 = 'in2.txt'
concat_file = 'out.txt'
with open(file1, 'w') as f1:
f1.write('Hello1')
with open(file2, 'w') as f2:
f2.write('Hello2')
osext.concat_files(concat_file, file1, file2, overwrite=True)
with open(concat_file) as cf:
out = cf.read()
assert out == 'Hello1\nHello2\n'
def test_unique_abs_paths():
p1 = 'a/b/c'
p2 = p1[:]
p3 = 'a/b'
p4 = '/d/e//'
p5 = '/d/e/f'
expected_paths = [os.path.abspath('a/b'), '/d/e']
actual_paths = osext.unique_abs_paths(
[p1, p2, p3, p4, p5])
assert expected_paths == actual_paths
expected_paths = [os.path.abspath('a/b/c'), os.path.abspath('a/b'),
'/d/e', '/d/e/f']
actual_paths = osext.unique_abs_paths(
[p1, p2, p3, p4, p5], prune_children=False)
assert expected_paths == actual_paths
with pytest.raises(TypeError):
osext.unique_abs_paths(None)
def test_cray_cdt_version(tmp_path, monkeypatch):
# Mock up a CDT file
rcfile = tmp_path / 'rcfile'
with open(rcfile, 'w') as fp:
fp.write('#%Module CDT 20.06\nblah blah\n')
monkeypatch.setenv('MODULERCFILE', str(rcfile))
assert osext.cray_cdt_version() == '20.06'
def test_cray_cdt_version_unknown_fmt(tmp_path, monkeypatch):
# Mock up a CDT file
rcfile = tmp_path / 'rcfile'
with open(rcfile, 'w') as fp:
fp.write('random stuff')
monkeypatch.setenv('MODULERCFILE', str(rcfile))
assert osext.cray_cdt_version() is None
def test_cray_cdt_version_empty_file(tmp_path, monkeypatch):
# Mock up a CDT file
rcfile = tmp_path / 'rcfile'
rcfile.touch()
monkeypatch.setenv('MODULERCFILE', str(rcfile))
assert osext.cray_cdt_version() is None
def test_cray_cdt_version_no_such_file(tmp_path, monkeypatch):
# Mock up a CDT file
rcfile = tmp_path / 'rcfile'
monkeypatch.setenv('MODULERCFILE', str(rcfile))
assert osext.cray_cdt_version() is None
def test_cray_cle_info(tmp_path):
# Mock up a CLE release
cle_info_file = tmp_path / 'cle-release'
with open(cle_info_file, 'w') as fp:
fp.write('RELEASE=7.0.UP01\n'
'BUILD=7.0.1227\n'
'DATE=20200326\n'
'ARCH=noarch\n'
'NETWORK=ari\n'
'PATCHSET=09-202003261814\n')
cle_info = osext.cray_cle_info(cle_info_file)
assert cle_info.release == '7.0.UP01'
assert cle_info.build == '7.0.1227'
assert cle_info.date == '20200326'
assert cle_info.network == 'ari'
assert cle_info.patchset == '09'
def test_cray_cle_info_no_such_file(tmp_path):
cle_info_file = tmp_path / 'cle-release'
assert osext.cray_cle_info(cle_info_file) is None
def test_cray_cle_info_missing_parts(tmp_path):
# Mock up a CLE release
cle_info_file = tmp_path / 'cle-release'
with open(cle_info_file, 'w') as fp:
fp.write('RELEASE=7.0.UP01\n'
'PATCHSET=09-202003261814\n')
cle_info = osext.cray_cle_info(cle_info_file)
assert cle_info.release == '7.0.UP01'
assert cle_info.build is None
assert cle_info.date is None
assert cle_info.network is None
assert cle_info.patchset == '09'
@pytest.fixture(params=['tmod', 'tmod4', 'lmod', 'nomod'])
def user_exec_ctx(request, make_exec_ctx_g):
if test_util.USER_CONFIG_FILE:
config_file, system = test_util.USER_CONFIG_FILE, test_util.USER_SYSTEM
else:
config_file, system = test_util.BUILTIN_CONFIG_FILE, 'generic'
try:
yield from make_exec_ctx_g(config_file, system,
{'systems/modules_system': request.param})
except ConfigError as e:
pytest.skip(str(e))
@pytest.fixture
def modules_system(user_exec_ctx, monkeypatch, tmp_path):
# Pretend to be on a clean modules environment
monkeypatch.setenv('MODULEPATH', '')
monkeypatch.setenv('LOADEDMODULES', '')
monkeypatch.setenv('_LMFILES_', '')
# Create a symlink to testmod_foo to check for unique module names
# found by `find_modules`
(tmp_path / 'testmod_foo').symlink_to(
os.path.join(test_util.TEST_MODULES, 'testmod_foo')
)
ms = rt.runtime().system.modules_system
ms.searchpath_add(str(tmp_path))
ms.searchpath_add(test_util.TEST_MODULES)
yield ms
ms.searchpath_remove(test_util.TEST_MODULES)
ms.searchpath_remove(str(tmp_path))
def test_find_modules(modules_system):
# The test modules will be found as many times as there are partitions and
# environments in the current system
current_system = rt.runtime().system
ntimes = sum(len(p.environs) for p in current_system.partitions)
found_modules = [m[2] for m in util.find_modules('testmod')]
if modules_system.name == 'nomod':
assert found_modules == []
else:
assert found_modules == ['testmod_bar', 'testmod_base', 'testmod_boo',
'testmod_ext', 'testmod_foo']*ntimes
def test_find_modules_env_mapping(modules_system):
# The test modules will be found as many times as there are partitions and
# environments in the current system
current_system = rt.runtime().system
ntimes = sum(len(p.environs) for p in current_system.partitions)
found_modules = [
m[2] for m in util.find_modules('testmod',
environ_mapping={
r'.*_ba.*': 'builtin',
r'testmod_foo': 'foo'
})
]
if modules_system.name == 'nomod':
assert found_modules == []
else:
assert found_modules == ['testmod_bar', 'testmod_base']*ntimes
def test_find_modules_errors():
with pytest.raises(TypeError):
list(util.find_modules(1))
with pytest.raises(TypeError):
list(util.find_modules(None))
with pytest.raises(TypeError):
list(util.find_modules('foo', 1))
def test_jsonext_dump(tmp_path):
json_dump = tmp_path / 'test.json'
with open(json_dump, 'w') as fp:
jsonext.dump({'foo': sn.defer(['bar'])}, fp)
with open(json_dump, 'r') as fp:
assert '{"foo": null}' == fp.read()
with open(json_dump, 'w') as fp:
jsonext.dump({'foo': sn.defer(['bar']).evaluate()}, fp)
with open(json_dump, 'r') as fp:
assert '{"foo": ["bar"]}' == fp.read()
with open(json_dump, 'w') as fp:
jsonext.dump({'foo': sn.defer(['bar'])}, fp, separators=(',', ':'))
with open(json_dump, 'r') as fp:
assert '{"foo":null}' == fp.read()
def test_jsonext_dumps():
assert '"foo"' == jsonext.dumps('foo')
assert '{"foo": ["bar"]}' == jsonext.dumps(
{'foo': sn.defer(['bar']).evaluate()}
)
assert '{"foo":["bar"]}' == jsonext.dumps(
{'foo': sn.defer(['bar']).evaluate()}, separators=(',', ':')
)
assert '{"(1, 2, 3)": 1}' == jsonext.dumps({(1, 2, 3): 1})
# Classes to test JSON deserialization
class _D(jsonext.JSONSerializable):
def __init__(self):
self.a = 2
self.b = 'bar'
def __eq__(self, other):
if not isinstance(other, _D):
return NotImplemented
return self.a == other.a and self.b == other.b
class _Z(_D):
pass
class _T(jsonext.JSONSerializable):
__slots__ = ('t',)
def __eq__(self, other):
if not isinstance(other, _T):
return NotImplemented
return self.t == other.t
class _C(jsonext.JSONSerializable):
def __init__(self, x, y):
self.x = x
self.y = y
self.z = None
self.w = {1, 2}
self.t = None
# Dump dict with tuples as keys
self.v = {(1, 2): 1}
def __rfm_json_decode__(self, json):
# Sets are converted to lists when encoding, we need to manually
# change them back to sets
self.w = set(json['w'])
def __eq__(self, other):
if not isinstance(other, _C):
return NotImplemented
return (self.x == other.x and
self.y == other.y and
self.z == other.z and
self.w == other.w and
self.t == other.t)
def test_jsonext_load(tmp_path):
c = _C(1, 'foo')
c.x += 1
c.y = 'foobar'
c.z = _Z()
c.z.a += 1
c.z.b = 'barfoo'
c.t = _T()
c.t.t = 5
json_dump = tmp_path / 'test.json'
with open(json_dump, 'w') as fp:
jsonext.dump(c, fp, indent=2)
with open(json_dump, 'r') as fp:
print(fp.read())
with open(json_dump, 'r') as fp:
c_restored = jsonext.load(fp)
assert c == c_restored
assert c is not c_restored
# Do the same with dumps() and loads()
c_restored = jsonext.loads(jsonext.dumps(c))
assert c == c_restored
assert c is not c_restored
def test_attr_validator():
class C:
def __init__(self):
self.x = 3
self.y = [1, 2, 3]
self.z = {'a': 1, 'b': 2}
class D:
def __init__(self):
self.x = 1
self.y = C()
has_no_str = util.attr_validator(lambda x: not isinstance(x, str))
d = D()
assert has_no_str(d)[0]
# Check when a list element does not validate
d.y.y[1] = 'foo'
assert has_no_str(d) == (False, 'D.y.y[1]')
d.y.y[1] = 2
# Check when a dict element does not validate
d.y.z['a'] = 'b'
assert has_no_str(d) == (False, "D.y.z['a']")
d.y.z['a'] = 1
# Check when an attribute does not validate
d.x = 'foo'
assert has_no_str(d) == (False, 'D.x')
d.x = 1
# Check when an attribute does not validate
d.y.x = 'foo'
assert has_no_str(d) == (False, 'D.y.x')
d.y.x = 3
# Check when an attribute does not validate against a custom type
has_no_c = util.attr_validator(lambda x: not isinstance(x, C))
assert has_no_c(d) == (False, 'D.y')
def test_is_picklable():
class X:
pass
x = X()
assert util.is_picklable(x)
assert not util.is_picklable(X)
assert util.is_picklable(1)
assert util.is_picklable([1, 2])
assert util.is_picklable((1, 2))
assert util.is_picklable({1, 2})
assert util.is_picklable({'a': 1, 'b': 2})
class Y:
def __reduce_ex__(self, proto):
raise TypeError
y = Y()
assert not util.is_picklable(y)
class Z:
def __reduce__(self):
return TypeError
# This is still picklable, because __reduce_ex__() is preferred
z = Z()
assert util.is_picklable(z)
def foo():
yield
assert not util.is_picklable(foo)
assert not util.is_picklable(foo())
def test_is_copyable():
class X:
pass
x = X()
assert util.is_copyable(x)
class Y:
def __copy__(self):
pass
y = Y()
assert util.is_copyable(y)
class Z:
def __deepcopy__(self, memo):
pass
z = Z()
assert util.is_copyable(z)
def foo():
yield
assert util.is_copyable(foo)
assert util.is_copyable(len)
assert util.is_copyable(int)
assert not util.is_copyable(foo())
def test_is_trivially_callable():
def foo():
pass
def bar(x, y):
pass
assert util.is_trivially_callable(foo)
assert util.is_trivially_callable(bar, non_def_args=2)
with pytest.raises(TypeError):
util.is_trivially_callable(1)
def test_nodelist_abbrev():
nid_nodes = [f'nid{n:03}' for n in range(5, 20)]
cid_nodes = [f'cid{n:03}' for n in range(20)]
random.shuffle(nid_nodes)
random.shuffle(cid_nodes)
nid_nodes.insert(0, 'nid002')
nid_nodes.insert(0, 'nid001')
nid_nodes.append('nid125')
cid_nodes += ['cid055', 'cid056']
all_nodes = nid_nodes + cid_nodes
random.shuffle(all_nodes)
nodelist = util.nodelist_abbrev
assert nodelist(nid_nodes) == 'nid00[1-2],nid0[05-19],nid125'
assert nodelist(cid_nodes) == 'cid0[00-19],cid05[5-6]'
assert nodelist(all_nodes) == (
'cid0[00-19],cid05[5-6],nid00[1-2],nid0[05-19],nid125'
)
# Test non-contiguous nodes
nid_nodes = []
for i in range(3):
nid_nodes += [f'nid{n:03}' for n in range(10*i, 10*i+5)]
random.shuffle(nid_nodes)
assert nodelist(nid_nodes) == 'nid00[0-4],nid01[0-4],nid02[0-4]'
assert nodelist(['nid01', 'nid10', 'nid20']) == 'nid01,nid10,nid20'
assert nodelist([]) == ''
assert nodelist(['nid001']) == 'nid001'
# Test host names with numbers in their basename (see GH #2357)
nodes = [f'c2-01-{n:02}' for n in range(100)]
assert nodelist(nodes) == 'c2-01-[00-99]'
# Test node duplicates
assert nodelist(['nid001', 'nid001', 'nid002']) == 'nid001,nid00[1-2]'
with pytest.raises(TypeError, match='nodes argument must be a Sequence'):
nodelist(1)
with pytest.raises(TypeError, match='nodes argument cannot be a string'):
nodelist('foo')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# netpbmfile.py
# Copyright (c) 2011-2013, Christoph Gohlke
# Copyright (c) 2011-2013, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from respectively to Netpbm files.
This implementation follows the Netpbm format specifications at
http://netpbm.sourceforge.net/doc/. No gamma correction is performed.
The following image formats are supported: PBM (bi-level), PGM (grayscale),
PPM (color), PAM (arbitrary), XV thumbnail (RGB332, read-only).
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.01.18
Requirements
------------
* `CPython 2.7, 3.2 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Matplotlib 1.2 <http://www.matplotlib.org>`_ (optional for plotting)
Examples
--------
>>> im1 = numpy.array([[0, 1],[65534, 65535]], dtype=numpy.uint16)
>>> imsave('_tmp.pgm', im1)
>>> im2 = imread('_tmp.pgm')
>>> assert numpy.all(im1 == im2)
"""
from __future__ import division, print_function
import sys
import re
import math
from copy import deepcopy
import numpy
__version__ = '2013.01.18'
__docformat__ = 'restructuredtext en'
__all__ = ['imread', 'imsave', 'NetpbmFile']
def imread(filename, *args, **kwargs):
"""Return image data from Netpbm file as numpy array.
`args` and `kwargs` are arguments to NetpbmFile.asarray().
Examples
--------
>>> image = imread('_tmp.pgm')
"""
try:
netpbm = NetpbmFile(filename)
image = netpbm.asarray()
finally:
netpbm.close()
return image
def imsave(filename, data, maxval=None, pam=False):
"""Write image data to Netpbm file.
Examples
--------
>>> image = numpy.array([[0, 1],[65534, 65535]], dtype=numpy.uint16)
>>> imsave('_tmp.pgm', image)
"""
try:
netpbm = NetpbmFile(data, maxval=maxval)
netpbm.write(filename, pam=pam)
finally:
netpbm.close()
class NetpbmFile(object):
"""Read and write Netpbm PAM, PBM, PGM, PPM, files."""
_types = {b'P1': b'BLACKANDWHITE', b'P2': b'GRAYSCALE', b'P3': b'RGB',
b'P4': b'BLACKANDWHITE', b'P5': b'GRAYSCALE', b'P6': b'RGB',
b'P7 332': b'RGB', b'P7': b'RGB_ALPHA'}
def __init__(self, arg=None, **kwargs):
"""Initialize instance from filename, open file, or numpy array."""
for attr in ('header', 'magicnum', 'width', 'height', 'maxval',
'depth', 'tupltypes', '_filename', '_fh', '_data'):
setattr(self, attr, None)
if arg is None:
self._fromdata([], **kwargs)
elif isinstance(arg, basestring):
self._fh = open(arg, 'rb')
self._filename = arg
self._fromfile(self._fh, **kwargs)
elif hasattr(arg, 'seek'):
self._fromfile(arg, **kwargs)
self._fh = arg
else:
self._fromdata(arg, **kwargs)
def asarray(self, copy=True, cache=False, **kwargs):
"""Return image data from file as numpy array."""
data = self._data
if data is None:
data = self._read_data(self._fh, **kwargs)
if cache:
self._data = data
else:
return data
return deepcopy(data) if copy else data
def write(self, arg, **kwargs):
"""Write instance to file."""
if hasattr(arg, 'seek'):
self._tofile(arg, **kwargs)
else:
with open(arg, 'wb') as fid:
self._tofile(fid, **kwargs)
def close(self):
"""Close open file. Future asarray calls might fail."""
if self._filename and self._fh:
self._fh.close()
self._fh = None
def __del__(self):
self.close()
def _fromfile(self, fh):
"""Initialize instance from open file."""
fh.seek(0)
data = fh.read(4096)
if (len(data) < 7) or not (b'0' < data[1:2] < b'8'):
raise ValueError("Not a Netpbm file:\n%s" % data[:32])
try:
self._read_pam_header(data)
except Exception:
try:
self._read_pnm_header(data)
except Exception:
raise ValueError("Not a Netpbm file:\n%s" % data[:32])
def _read_pam_header(self, data):
"""Read PAM header and initialize instance."""
regroups = re.search(
b"(^P7[\n\r]+(?:(?:[\n\r]+)|(?:#.*)|"
b"(HEIGHT\s+\d+)|(WIDTH\s+\d+)|(DEPTH\s+\d+)|(MAXVAL\s+\d+)|"
b"(?:TUPLTYPE\s+\w+))*ENDHDR\n)", data).groups()
self.header = regroups[0]
self.magicnum = b'P7'
for group in regroups[1:]:
key, value = group.split()
setattr(self, unicode(key).lower(), int(value))
matches = re.findall(b"(TUPLTYPE\s+\w+)", self.header)
self.tupltypes = [s.split(None, 1)[1] for s in matches]
def _read_pnm_header(self, data):
"""Read PNM header and initialize instance."""
bpm = data[1:2] in b"14"
regroups = re.search(b"".join((
b"(^(P[123456]|P7 332)\s+(?:#.*[\r\n])*",
b"\s*(\d+)\s+(?:#.*[\r\n])*",
b"\s*(\d+)\s+(?:#.*[\r\n])*" * (not bpm),
b"\s*(\d+)\s(?:\s*#.*[\r\n]\s)*)")), data).groups() + (1, ) * bpm
self.header = regroups[0]
self.magicnum = regroups[1]
self.width = int(regroups[2])
self.height = int(regroups[3])
self.maxval = int(regroups[4])
self.depth = 3 if self.magicnum in b"P3P6P7 332" else 1
self.tupltypes = [self._types[self.magicnum]]
def _read_data(self, fh, byteorder='>'):
"""Return image data from open file as numpy array."""
fh.seek(len(self.header))
data = fh.read()
dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'
depth = 1 if self.magicnum == b"P7 332" else self.depth
shape = [-1, self.height, self.width, depth]
size = numpy.prod(shape[1:])
if self.magicnum in b"P1P2P3":
data = numpy.array(data.split(None, size)[:size], dtype)
data = data.reshape(shape)
elif self.maxval == 1:
shape[2] = int(math.ceil(self.width / 8))
data = numpy.frombuffer(data, dtype).reshape(shape)
data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]
else:
data = numpy.frombuffer(data, dtype)
data = data[:size * (data.size // size)].reshape(shape)
if data.shape[0] < 2:
data = data.reshape(data.shape[1:])
if data.shape[-1] < 2:
data = data.reshape(data.shape[:-1])
if self.magicnum == b"P7 332":
rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)
rgb332 *= [36, 36, 85]
data = numpy.take(rgb332, data, axis=0)
return data
def _fromdata(self, data, maxval=None):
"""Initialize instance from numpy array."""
data = numpy.array(data, ndmin=2, copy=True)
if data.dtype.kind not in "uib":
raise ValueError("not an integer type: %s" % data.dtype)
if data.dtype.kind == 'i' and numpy.min(data) < 0:
raise ValueError("data out of range: %i" % numpy.min(data))
if maxval is None:
maxval = numpy.max(data)
maxval = 255 if maxval < 256 else 65535
if maxval < 0 or maxval > 65535:
raise ValueError("data out of range: %i" % maxval)
data = data.astype('u1' if maxval < 256 else '>u2')
self._data = data
if data.ndim > 2 and data.shape[-1] in (3, 4):
self.depth = data.shape[-1]
self.width = data.shape[-2]
self.height = data.shape[-3]
self.magicnum = b'P7' if self.depth == 4 else b'P6'
else:
self.depth = 1
self.width = data.shape[-1]
self.height = data.shape[-2]
self.magicnum = b'P5' if maxval > 1 else b'P4'
self.maxval = maxval
self.tupltypes = [self._types[self.magicnum]]
self.header = self._header()
def _tofile(self, fh, pam=False):
"""Write Netbm file."""
fh.seek(0)
fh.write(self._header(pam))
data = self.asarray(copy=False)
if self.maxval == 1:
data = numpy.packbits(data, axis=-1)
data.tofile(fh)
def _header(self, pam=False):
"""Return file header as byte string."""
if pam or self.magicnum == b'P7':
header = "\n".join((
"P7",
"HEIGHT %i" % self.height,
"WIDTH %i" % self.width,
"DEPTH %i" % self.depth,
"MAXVAL %i" % self.maxval,
"\n".join("TUPLTYPE %s" % unicode(i) for i in self.tupltypes),
"ENDHDR\n"))
elif self.maxval == 1:
header = "P4 %i %i\n" % (self.width, self.height)
elif self.depth == 1:
header = "P5 %i %i %i\n" % (self.width, self.height, self.maxval)
else:
header = "P6 %i %i %i\n" % (self.width, self.height, self.maxval)
if sys.version_info[0] > 2:
header = bytes(header, 'ascii')
return header
def __str__(self):
"""Return information about instance."""
return unicode(self.header)
if sys.version_info[0] > 2:
basestring = str
unicode = lambda x: str(x, 'ascii')
if __name__ == "__main__":
# Show images specified on command line or all images in current directory
from glob import glob
from matplotlib import pyplot
files = sys.argv[1:] if len(sys.argv) > 1 else glob('*.p*m')
for fname in files:
try:
pam = NetpbmFile(fname)
img = pam.asarray(copy=False)
if False:
pam.write('_tmp.pgm.out', pam=True)
img2 = imread('_tmp.pgm.out')
assert numpy.all(img == img2)
imsave('_tmp.pgm.out', img)
img2 = imread('_tmp.pgm.out')
assert numpy.all(img == img2)
pam.close()
except ValueError as e:
print(fname, e)
continue
_shape = img.shape
if img.ndim > 3 or (img.ndim > 2 and img.shape[-1] not in (3, 4)):
img = img[0]
cmap = 'gray' if pam.maxval > 1 else 'binary'
pyplot.imshow(img, cmap, interpolation='nearest')
pyplot.title("%s %s %s %s" % (fname, unicode(pam.magicnum),
_shape, img.dtype))
pyplot.show()
|
import numpy as np
import vedo
import vedo.colors as colors
import vedo.utils as utils
import vtk
from vtk.util.numpy_support import numpy_to_vtk
__doc__ = ("Submodule to work with common format images." + vedo.docs._defs)
__all__ = ["Picture"]
#################################################
def _get_img(obj, flip=False):
# get vtkImageData from numpy array
obj = np.asarray(obj)
if obj.ndim == 3: # has shape (nx,ny, ncolor_alpha_chan)
iac = vtk.vtkImageAppendComponents()
nchan = obj.shape[2] # get number of channels in inputimage (L/LA/RGB/RGBA)
for i in range(nchan):
if flip:
arr = np.flip(np.flip(obj[:,:,i], 0), 0).ravel()
else:
arr = np.flip(obj[:,:,i], 0).ravel()
arr = np.clip(arr, 0, 255)
varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
varb.SetName("RGBA")
imgb = vtk.vtkImageData()
imgb.SetDimensions(obj.shape[1], obj.shape[0], 1)
imgb.GetPointData().AddArray(varb)
imgb.GetPointData().SetActiveScalars("RGBA")
iac.AddInputData(imgb)
iac.Update()
img = iac.GetOutput()
elif obj.ndim == 2: # black and white
if flip:
arr = np.flip(obj[:,:], 0).ravel()
else:
arr = obj.ravel()
arr = np.clip(arr, 0, 255)
varb = numpy_to_vtk(arr, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
varb.SetName("RGBA")
img = vtk.vtkImageData()
img.SetDimensions(obj.shape[1], obj.shape[0], 1)
img.GetPointData().AddArray(varb)
img.GetPointData().SetActiveScalars("RGBA")
return img
#################################################
class Picture(vtk.vtkImageActor, vedo.base.Base3DProp):
"""
Derived class of ``vtkImageActor``. Used to represent 2D pictures.
Can be instantiated with a path file name or with a numpy array.
Use `Picture.shape` to access the number of pixels in x and y.
|rotateImage| |rotateImage.py|_
:param list channels: only select these specific rgba channels (useful to remove alpha)
:param bool flip: flip xy axis convention (when input is a numpy array)
"""
def __init__(self, obj=None, channels=(), flip=False):
vtk.vtkImageActor.__init__(self)
vedo.base.Base3DProp.__init__(self)
if utils.isSequence(obj) and len(obj): # passing array
img = _get_img(obj, flip)
elif isinstance(obj, vtk.vtkImageData):
img = obj
elif isinstance(obj, str):
if "https://" in obj:
obj = vedo.io.download(obj, verbose=False)
fname = obj.lower()
if fname.endswith(".png"):
picr = vtk.vtkPNGReader()
elif fname.endswith(".jpg") or fname.endswith(".jpeg"):
picr = vtk.vtkJPEGReader()
elif fname.endswith(".bmp"):
picr = vtk.vtkBMPReader()
elif fname.endswith(".tif") or fname.endswith(".tiff"):
picr = vtk.vtkTIFFReader()
picr.SetOrientationType(vedo.settings.tiffOrientationType)
else:
colors.printc("Cannot understand picture format", obj, c='r')
return
picr.SetFileName(obj)
self.filename = obj
picr.Update()
img = picr.GetOutput()
else:
img = vtk.vtkImageData()
# select channels
nchans = len(channels)
if nchans and img.GetPointData().GetScalars().GetNumberOfComponents() > nchans:
pec = vtk.vtkImageExtractComponents()
pec.SetInputData(img)
if nchans == 3:
pec.SetComponents(channels[0], channels[1], channels[2])
elif nchans == 2:
pec.SetComponents(channels[0], channels[1])
elif nchans == 1:
pec.SetComponents(channels[0])
pec.Update()
img = pec.GetOutput()
self._data = img
self.SetInputData(img)
sx,sy,_ = img.GetDimensions()
self.shape = np.array([sx,sy])
self._mapper = self.GetMapper()
def inputdata(self):
"""Return the underlying ``vtkImagaData`` object."""
return self._data
def dimensions(self):
nx, ny, _ = self._data.GetDimensions()
return np.array([nx, ny])
def channels(self):
return self._data.GetPointData().GetScalars().GetNumberOfComponents()
def _update(self, data):
"""Overwrite the Picture data mesh with a new data."""
self._data = data
self._mapper.SetInputData(data)
self._mapper.Modified()
return self
def clone(self, transform=False):
"""Return an exact copy of the input Picture.
If transform is True, it is given the same scaling and position."""
img = vtk.vtkImageData()
img.DeepCopy(self._data)
pic = Picture(img)
if transform:
# assign the same transformation to the copy
pic.SetOrigin(self.GetOrigin())
pic.SetScale(self.GetScale())
pic.SetOrientation(self.GetOrientation())
pic.SetPosition(self.GetPosition())
return pic
def extent(self, ext=None):
"""
Get or set the physical extent that the picture spans.
Format is ext=[minx, maxx, miny, maxy].
"""
if ext is None:
return self._data.GetExtent()
else:
self._data.SetExtent(ext[0],ext[1],ext[2],ext[3],0,0)
self._mapper.Modified()
return self
def alpha(self, a=None):
"""Set/get picture's transparency in the rendering scene."""
if a is not None:
self.GetProperty().SetOpacity(a)
return self
else:
return self.GetProperty().GetOpacity()
def level(self, value=None):
"""Get/Set the image color level (brightness) in the rendering scene."""
if value is None:
return self.GetProperty().GetColorLevel()
self.GetProperty().SetColorLevel(value)
return self
def window(self, value=None):
"""Get/Set the image color window (contrast) in the rendering scene."""
if value is None:
return self.GetProperty().GetColorWindow()
self.GetProperty().SetColorWindow(value)
return self
def crop(self, top=None, bottom=None, right=None, left=None, pixels=False):
"""Crop picture.
:param float top: fraction to crop from the top margin
:param float bottom: fraction to crop from the bottom margin
:param float left: fraction to crop from the left margin
:param float right: fraction to crop from the right margin
:param bool pixels: units are pixels
"""
extractVOI = vtk.vtkExtractVOI()
extractVOI.SetInputData(self._data)
extractVOI.IncludeBoundaryOn()
d = self.GetInput().GetDimensions()
if pixels:
extractVOI.SetVOI(right, d[0]-left, bottom, d[1]-top, 0, 0)
else:
bx0, bx1, by0, by1 = 0, d[0]-1, 0, d[1]-1
if left is not None: bx0 = int((d[0]-1)*left)
if right is not None: bx1 = int((d[0]-1)*(1-right))
if bottom is not None: by0 = int((d[1]-1)*bottom)
if top is not None: by1 = int((d[1]-1)*(1-top))
extractVOI.SetVOI(bx0, bx1, by0, by1, 0, 0)
extractVOI.Update()
return self._update(extractVOI.GetOutput())
def pad(self, pixels=10, value=255):
"""
Add the specified number of pixels at the picture borders.
Pixels can be a list formatted as [left,right,bottom,top].
Parameters
----------
pixels : int,list , optional
number of pixels to be added (or a list of length 4). The default is 10.
value : int, optional
intensity value (gray-scale color) of the padding. The default is 255.
"""
x0,x1,y0,y1,_z0,_z1 = self._data.GetExtent()
pf = vtk.vtkImageConstantPad()
pf.SetInputData(self._data)
pf.SetConstant(value)
if utils.isSequence(pixels):
pf.SetOutputWholeExtent(x0-pixels[0],x1+pixels[1],
y0-pixels[2],y1+pixels[3], 0,0)
else:
pf.SetOutputWholeExtent(x0-pixels,x1+pixels, y0-pixels,y1+pixels, 0,0)
pf.Update()
img = pf.GetOutput()
return self._update(img)
def tile(self, nx=4, ny=4, shift=(0,0)):
"""
Generate a tiling from the current picture by mirroring and repeating it.
Parameters
----------
nx : float, optional
number of repeats along x. The default is 4.
ny : float, optional
number of repeats along x. The default is 4.
shift : list, optional
shift in x and y in pixels. The default is 4.
"""
x0,x1,y0,y1,z0,z1 = self._data.GetExtent()
constantPad = vtk.vtkImageMirrorPad()
constantPad.SetInputData(self._data)
constantPad.SetOutputWholeExtent(int(x0+shift[0]+0.5), int(x1*nx+shift[0]+0.5),
int(y0+shift[1]+0.5), int(y1*ny+shift[1]+0.5), z0,z1)
constantPad.Update()
return Picture(constantPad.GetOutput())
def append(self, pictures, axis='z', preserveExtents=False):
"""
Append the input images to the current one along the specified axis.
Except for the append axis, all inputs must have the same extent.
All inputs must have the same number of scalar components.
The output has the same origin and spacing as the first input.
The origin and spacing of all other inputs are ignored.
All inputs must have the same scalar type.
:param int,str axis: axis expanded to hold the multiple images.
:param bool preserveExtents: if True, the extent of the inputs is used to place
the image in the output. The whole extent of the output is the union of the input
whole extents. Any portion of the output not covered by the inputs is set to zero.
The origin and spacing is taken from the first input.
.. code-block:: python
from vedo import Picture, dataurl
pic = Picture(dataurl+'dog.jpg').pad()
pic.append([pic,pic,pic], axis='y')
pic.append([pic,pic,pic,pic], axis='x')
pic.show(axes=1)
"""
ima = vtk.vtkImageAppend()
ima.SetInputData(self._data)
if not utils.isSequence(pictures):
pictures = [pictures]
for p in pictures:
if isinstance(p, vtk.vtkImageData):
ima.AddInputData(p)
else:
ima.AddInputData(p._data)
ima.SetPreserveExtents(preserveExtents)
if axis == "x":
axis = 0
elif axis == "y":
axis = 1
ima.SetAppendAxis(axis)
ima.Update()
return self._update(ima.GetOutput())
def resize(self, newsize):
"""Resize the image resolution by specifying the number of pixels in width and height.
If left to zero, it will be automatically calculated to keep the original aspect ratio.
:param list,float newsize: shape of picture as [npx, npy], or as a fraction.
"""
old_dims = np.array(self._data.GetDimensions())
if not utils.isSequence(newsize):
newsize = (old_dims * newsize + 0.5).astype(int)
if not newsize[1]:
ar = old_dims[1]/old_dims[0]
newsize = [newsize[0], int(newsize[0]*ar+0.5)]
if not newsize[0]:
ar = old_dims[0]/old_dims[1]
newsize = [int(newsize[1]*ar+0.5), newsize[1]]
newsize = [newsize[0], newsize[1], old_dims[2]]
rsz = vtk.vtkImageResize()
rsz.SetInputData(self._data)
rsz.SetResizeMethodToOutputDimensions()
rsz.SetOutputDimensions(newsize)
rsz.Update()
out = rsz.GetOutput()
out.SetSpacing(1,1,1)
return self._update(out)
def mirror(self, axis="x"):
"""Mirror picture along x or y axis. Same as flip()."""
ff = vtk.vtkImageFlip()
ff.SetInputData(self.inputdata())
if axis.lower() == "x":
ff.SetFilteredAxis(0)
elif axis.lower() == "y":
ff.SetFilteredAxis(1)
else:
colors.printc("Error in mirror(): mirror must be set to x or y.", c='r')
raise RuntimeError()
ff.Update()
return self._update(ff.GetOutput())
def flip(self, axis="y"):
"""Mirror picture along x or y axis. Same as mirror()."""
return self.mirror(axis=axis)
def rotate(self, angle, center=(), scale=1, mirroring=False, bc='w', alpha=1):
"""
Rotate by the specified angle (anticlockwise).
Parameters
----------
angle : float
rotation angle in degrees.
center: list
center of rotation (x,y) in pixels.
"""
bounds = self.bounds()
pc = [0,0,0]
if center:
pc[0] = center[0]
pc[1] = center[1]
else:
pc[0] = (bounds[1] + bounds[0]) / 2.0
pc[1] = (bounds[3] + bounds[2]) / 2.0
pc[2] = (bounds[5] + bounds[4]) / 2.0
transform = vtk.vtkTransform()
transform.Translate(pc)
transform.RotateWXYZ(-angle, 0, 0, 1)
transform.Scale(1/scale,1/scale,1)
transform.Translate(-pc[0], -pc[1], -pc[2])
reslice = vtk.vtkImageReslice()
reslice.SetMirror(mirroring)
c = np.array(colors.getColor(bc))*255
reslice.SetBackgroundColor([c[0],c[1],c[2], alpha*255])
reslice.SetInputData(self._data)
reslice.SetResliceTransform(transform)
reslice.SetOutputDimensionality(2)
reslice.SetInterpolationModeToCubic()
reslice.SetOutputSpacing(self._data.GetSpacing())
reslice.SetOutputOrigin(self._data.GetOrigin())
reslice.SetOutputExtent(self._data.GetExtent())
reslice.Update()
return self._update(reslice.GetOutput())
def select(self, component):
"""Select one single component of the rgb image"""
ec = vtk.vtkImageExtractComponents()
ec.SetInputData(self._data)
ec.SetComponents(component)
ec.Update()
return Picture(ec.GetOutput())
def bw(self):
"""Make it black and white using luminance calibration"""
n = self._data.GetPointData().GetNumberOfComponents()
if n==4:
ecr = vtk.vtkImageExtractComponents()
ecr.SetInputData(self._data)
ecr.SetComponents(0,1,2)
ecr.Update()
img = ecr.GetOutput()
else:
img = self._data
ecr = vtk.vtkImageLuminance()
ecr.SetInputData(img)
ecr.Update()
return self._update(ecr.GetOutput())
def smooth(self, sigma=3, radius=None):
"""
Smooth a Picture with Gaussian kernel.
Parameters
----------
sigma : int, optional
number of sigmas in pixel units. The default is 3.
radius : TYPE, optional
how far out the gaussian kernel will go before being clamped to zero. The default is None.
"""
gsf = vtk.vtkImageGaussianSmooth()
gsf.SetDimensionality(2)
gsf.SetInputData(self._data)
if radius is not None:
if utils.isSequence(radius):
gsf.SetRadiusFactors(radius[0],radius[1])
else:
gsf.SetRadiusFactor(radius)
if utils.isSequence(sigma):
gsf.SetStandardDeviations(sigma[0], sigma[1])
else:
gsf.SetStandardDeviation(sigma)
gsf.Update()
return self._update(gsf.GetOutput())
def median(self):
"""Median filter that preserves thin lines and corners.
It operates on a 5x5 pixel neighborhood. It computes two values initially:
the median of the + neighbors and the median of the x neighbors.
It then computes the median of these two values plus the center pixel.
This result of this second median is the output pixel value.
"""
medf = vtk.vtkImageHybridMedian2D()
medf.SetInputData(self._data)
medf.Update()
return self._update(medf.GetOutput())
def enhance(self):
"""
Enhance a b&w picture using the laplacian, enhancing high-freq edges.
Example:
.. code-block:: python
import vedo
p = vedo.Picture(vedo.dataurl+'images/dog.jpg').bw()
vedo.show(p, p.clone().enhance(), N=2, mode='image')
"""
img = self._data
scalarRange = img.GetPointData().GetScalars().GetRange()
cast = vtk.vtkImageCast()
cast.SetInputData(img)
cast.SetOutputScalarTypeToDouble()
cast.Update()
laplacian = vtk.vtkImageLaplacian()
laplacian.SetInputData(cast.GetOutput())
laplacian.SetDimensionality(2)
laplacian.Update()
subtr = vtk.vtkImageMathematics()
subtr.SetInputData(0, cast.GetOutput())
subtr.SetInputData(1, laplacian.GetOutput())
subtr.SetOperationToSubtract()
subtr.Update()
colorWindow = scalarRange[1] - scalarRange[0]
colorLevel = colorWindow / 2
originalColor = vtk.vtkImageMapToWindowLevelColors()
originalColor.SetWindow(colorWindow)
originalColor.SetLevel(colorLevel)
originalColor.SetInputData(subtr.GetOutput())
originalColor.Update()
return self._update(originalColor.GetOutput())
def fft(self, mode='magnitude', logscale=12, center=True):
"""Fast Fourier transform of a picture.
:param float logscale: if non-zero, take the logarithm of the
intensity and scale it by this factor.
:param str mode: either [magnitude, real, imaginary, complex], compute the
point array data accordingly.
:param bool center: shift constant zero-frequency to the center of the image for display.
(FFT converts spatial images into frequency space, but puts the zero frequency at the origin)
"""
ffti = vtk.vtkImageFFT()
ffti.SetInputData(self._data)
ffti.Update()
if 'mag' in mode:
mag = vtk.vtkImageMagnitude()
mag.SetInputData(ffti.GetOutput())
mag.Update()
out = mag.GetOutput()
elif 'real' in mode:
extractRealFilter = vtk.vtkImageExtractComponents()
extractRealFilter.SetInputData(ffti.GetOutput())
extractRealFilter.SetComponents(0)
extractRealFilter.Update()
out = extractRealFilter.GetOutput()
elif 'imaginary' in mode:
extractImgFilter = vtk.vtkImageExtractComponents()
extractImgFilter.SetInputData(ffti.GetOutput())
extractImgFilter.SetComponents(1)
extractImgFilter.Update()
out = extractImgFilter.GetOutput()
elif 'complex' in mode:
out = ffti.GetOutput()
else:
colors.printc("Error in fft(): unknown mode", mode)
raise RuntimeError()
if center:
center = vtk.vtkImageFourierCenter()
center.SetInputData(out)
center.Update()
out = center.GetOutput()
if 'complex' not in mode:
if logscale:
ils = vtk.vtkImageLogarithmicScale()
ils.SetInputData(out)
ils.SetConstant(logscale)
ils.Update()
out = ils.GetOutput()
return Picture(out)
def rfft(self, mode='magnitude'):
"""Reverse Fast Fourier transform of a picture."""
ffti = vtk.vtkImageRFFT()
ffti.SetInputData(self._data)
ffti.Update()
if 'mag' in mode:
mag = vtk.vtkImageMagnitude()
mag.SetInputData(ffti.GetOutput())
mag.Update()
out = mag.GetOutput()
elif 'real' in mode:
extractRealFilter = vtk.vtkImageExtractComponents()
extractRealFilter.SetInputData(ffti.GetOutput())
extractRealFilter.SetComponents(0)
extractRealFilter.Update()
out = extractRealFilter.GetOutput()
elif 'imaginary' in mode:
extractImgFilter = vtk.vtkImageExtractComponents()
extractImgFilter.SetInputData(ffti.GetOutput())
extractImgFilter.SetComponents(1)
extractImgFilter.Update()
out = extractImgFilter.GetOutput()
elif 'complex' in mode:
out = ffti.GetOutput()
else:
colors.printc("Error in rfft(): unknown mode", mode)
raise RuntimeError()
return Picture(out)
def filterpass(self, lowcutoff=None, highcutoff=None, order=3):
"""
Low-pass and high-pass filtering become trivial in the frequency domain.
A portion of the pixels/voxels are simply masked or attenuated.
This function applies a high pass Butterworth filter that attenuates the
frequency domain image with the function
|G_Of_Omega|
The gradual attenuation of the filter is important.
A simple high-pass filter would simply mask a set of pixels in the frequency domain,
but the abrupt transition would cause a ringing effect in the spatial domain.
:param list lowcutoff: the cutoff frequencies
:param list highcutoff: the cutoff frequencies
:param int order: order determines sharpness of the cutoff curve
"""
#https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
fft = vtk.vtkImageFFT()
fft.SetInputData(self._data)
fft.Update()
out = fft.GetOutput()
if highcutoff:
butterworthLowPass = vtk.vtkImageButterworthLowPass()
butterworthLowPass.SetInputData(out)
butterworthLowPass.SetCutOff(highcutoff)
butterworthLowPass.SetOrder(order)
butterworthLowPass.Update()
out = butterworthLowPass.GetOutput()
if lowcutoff:
butterworthHighPass = vtk.vtkImageButterworthHighPass()
butterworthHighPass.SetInputData(out)
butterworthHighPass.SetCutOff(lowcutoff)
butterworthHighPass.SetOrder(order)
butterworthHighPass.Update()
out = butterworthHighPass.GetOutput()
butterworthRfft = vtk.vtkImageRFFT()
butterworthRfft.SetInputData(out)
butterworthRfft.Update()
butterworthReal = vtk.vtkImageExtractComponents()
butterworthReal.SetInputData(butterworthRfft.GetOutput())
butterworthReal.SetComponents(0)
butterworthReal.Update()
caster = vtk.vtkImageCast()
caster. SetOutputScalarTypeToUnsignedChar()
caster.SetInputData(butterworthReal.GetOutput())
caster.Update()
return self._update(caster.GetOutput())
def blend(self, pic, alpha1=0.5, alpha2=0.5):
"""Take L, LA, RGB, or RGBA images as input and blends
them according to the alpha values and/or the opacity setting for each input.
"""
blf = vtk.vtkImageBlend()
blf.AddInputData(self._data)
blf.AddInputData(pic._data)
blf.SetOpacity(0, alpha1)
blf.SetOpacity(1, alpha2)
blf.SetBlendModeToNormal()
blf.Update()
return self._update(blf.GetOutput())
def warp(self, sourcePts=(), targetPts=(), transform=None, sigma=1,
mirroring=False, bc='w', alpha=1):
"""
Warp an image using thin-plate splines.
Parameters
----------
sourcePts : list, optional
source points.
targetPts : list, optional
target points.
transform : TYPE, optional
a vtkTransform object can be supplied. The default is None.
sigma : float, optional
stiffness of the interpolation. The default is 1.
mirroring : TYPE, optional
fill the margins with a reflection of the original image. The default is False.
bc : TYPE, optional
fill the margins with a solid color. The default is 'w'.
alpha : TYPE, optional
opacity of the filled margins. The default is 1.
"""
if transform is None:
# source and target must be filled
transform = vtk.vtkThinPlateSplineTransform()
transform.SetBasisToR2LogR()
if isinstance(sourcePts, vedo.Points):
sourcePts = sourcePts.points()
if isinstance(targetPts, vedo.Points):
targetPts = targetPts.points()
ns = len(sourcePts)
nt = len(targetPts)
if ns != nt:
colors.printc("Error in picture.warp(): #source != #target points", ns, nt, c='r')
raise RuntimeError()
ptsou = vtk.vtkPoints()
ptsou.SetNumberOfPoints(ns)
pttar = vtk.vtkPoints()
pttar.SetNumberOfPoints(nt)
for i in range(ns):
p = sourcePts[i]
ptsou.SetPoint(i, [p[0],p[1],0])
p = targetPts[i]
pttar.SetPoint(i, [p[0],p[1],0])
transform.SetSigma(sigma)
transform.SetSourceLandmarks(pttar)
transform.SetTargetLandmarks(ptsou)
else:
# ignore source and target
pass
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self._data)
reslice.SetOutputDimensionality(2)
reslice.SetResliceTransform(transform)
reslice.SetInterpolationModeToCubic()
reslice.SetMirror(mirroring)
c = np.array(colors.getColor(bc))*255
reslice.SetBackgroundColor([c[0],c[1],c[2], alpha*255])
reslice.Update()
self.transform = transform
return self._update(reslice.GetOutput())
def invert(self):
"""
Return an inverted picture (inverted in each color channel).
"""
rgb = self.tonumpy()
data = 255 - np.array(rgb)
return self._update(_get_img(data))
def binarize(self, thresh=None, invert=False):
"""Return a new Picture where pixel above threshold are set to 255
and pixels below are set to 0.
Parameters
----------
invert : bool, optional
Invert threshold. Default is False.
Example
-------
.. code-block:: python
from vedo import Picture, show
pic1 = Picture("https://aws.glamour.es/prod/designs/v1/assets/620x459/547577.jpg")
pic2 = pic1.clone().invert()
pic3 = pic1.clone().binarize()
show(pic1, pic2, pic3, N=3, bg="blue9")
"""
rgb = self.tonumpy()
if rgb.ndim == 3:
intensity = np.sum(rgb, axis=2)/3
else:
intensity = rgb
if thresh is None:
vmin, vmax = np.min(intensity), np.max(intensity)
thresh = (vmax+vmin)/2
data = np.zeros_like(intensity).astype(np.uint8)
mask = np.where(intensity>thresh)
if invert:
data += 255
data[mask] = 0
else:
data[mask] = 255
return self._update(_get_img(data, flip=True))
def threshold(self, value=None, flip=False):
"""
Create a polygonal Mesh from a Picture by filling regions with pixels
luminosity above a specified value.
Parameters
----------
value : float, optional
The default is None, e.i. 1/3 of the scalar range.
flip: bool, optional
Flip polygon orientations
Returns
-------
Mesh
A polygonal mesh.
"""
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(self._data)
mgf.Update()
msq = vtk.vtkMarchingSquares()
msq.SetInputData(mgf.GetOutput())
if value is None:
r0,r1 = self._data.GetScalarRange()
value = r0 + (r1-r0)/3
msq.SetValue(0, value)
msq.Update()
if flip:
rs = vtk.vtkReverseSense()
rs.SetInputData(msq.GetOutput())
rs.ReverseCellsOn()
rs.ReverseNormalsOff()
rs.Update()
output = rs.GetOutput()
else:
output = msq.GetOutput()
ctr = vtk.vtkContourTriangulator()
ctr.SetInputData(output)
ctr.Update()
return vedo.Mesh(ctr.GetOutput(), c='k').bc('t').lighting('off')
def tomesh(self):
"""
Convert an image to polygonal data (quads),
with each polygon vertex assigned a RGBA value.
"""
dims = self._data.GetDimensions()
gr = vedo.shapes.Grid(sx=dims[0], sy=dims[1], resx=dims[0]-1, resy=dims[1]-1)
gr.pos(int(dims[0]/2), int(dims[1]/2)).pickable(True).wireframe(False).lw(0)
self._data.GetPointData().GetScalars().SetName("RGBA")
gr.inputdata().GetPointData().AddArray(self._data.GetPointData().GetScalars())
gr.inputdata().GetPointData().SetActiveScalars("RGBA")
gr._mapper.SetArrayName("RGBA")
gr._mapper.SetScalarModeToUsePointData()
# gr._mapper.SetColorModeToDirectScalars()
gr._mapper.ScalarVisibilityOn()
gr.name = self.name
gr.filename = self.filename
return gr
def tonumpy(self):
"""Get read-write access to pixels of a Picture object as a numpy array.
Note that the shape is (nrofchannels, nx, ny).
When you set values in the output image, you don't want numpy to reallocate the array
but instead set values in the existing array, so use the [:] operator.
Example: arr[:] = arr - 15
If the array is modified call:
``picture.modified()``
when all your modifications are completed.
"""
nx, ny, _ = self._data.GetDimensions()
nchan = self._data.GetPointData().GetScalars().GetNumberOfComponents()
narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(ny,nx,nchan)
narray = np.flip(narray, axis=0).astype(np.uint8)
return narray
def box(self, xspan, yspan, c='green5', alpha=1):
"""Draw a box on top of current image. Units are pixels.
.. code-block:: python
import vedo
pic = vedo.Picture("dog.jpg")
pic.box([100,300], [100,200], c='green4', alpha=0.7)
pic.line([100,100],[400,500], lw=2, alpha=1)
pic.triangle([250,300], [100,300], [200,400])
vedo.show(pic, axes=1)
"""
x1, x2 = xspan
y1, y2 = yspan
r,g,b = vedo.colors.getColor(c)
c = np.array([r,g,b]) * 255
c = c.astype(np.uint8)
if alpha>1:
alpha=1
if alpha<=0:
return self
alpha2 = alpha
alpha1 = 1-alpha
nx, ny = self.dimensions()
if x2>nx : x2=nx-1
if y2>ny : y2=ny-1
nchan = self.channels()
narrayA = self.tonumpy()
canvas_source = vtk.vtkImageCanvasSource2D()
canvas_source.SetExtent(0, nx-1, 0, ny-1, 0, 0)
canvas_source.SetScalarTypeToUnsignedChar()
canvas_source.SetNumberOfScalarComponents(nchan)
canvas_source.SetDrawColor(255,255,255)
canvas_source.FillBox(x1, x2, y1, y2)
canvas_source.Update()
image_data = canvas_source.GetOutput()
vscals = image_data.GetPointData().GetScalars()
narrayB = vedo.utils.vtk2numpy(vscals).reshape(ny,nx,nchan)
narrayB = np.flip(narrayB, axis=0)
narrayC = np.where(narrayB < 255, narrayA, alpha1*narrayA+alpha2*c)
return self._update(_get_img(narrayC))
def line(self, p1, p2, lw=2, c='k2', alpha=1):
"""Draw a line on top of current image. Units are pixels."""
x1, x2 = p1
y1, y2 = p2
r,g,b = vedo.colors.getColor(c)
c = np.array([r,g,b]) * 255
c = c.astype(np.uint8)
if alpha>1:
alpha=1
if alpha<=0:
return self
alpha2 = alpha
alpha1 = 1-alpha
nx, ny = self.dimensions()
if x2>nx : x2=nx-1
if y2>ny : y2=ny-1
nchan = self.channels()
narrayA = self.tonumpy()
canvas_source = vtk.vtkImageCanvasSource2D()
canvas_source.SetExtent(0, nx-1, 0, ny-1, 0, 0)
canvas_source.SetScalarTypeToUnsignedChar()
canvas_source.SetNumberOfScalarComponents(nchan)
canvas_source.SetDrawColor(255,255,255)
canvas_source.FillTube(x1, x2, y1, y2, lw)
canvas_source.Update()
image_data = canvas_source.GetOutput()
vscals = image_data.GetPointData().GetScalars()
narrayB = vedo.utils.vtk2numpy(vscals).reshape(ny,nx,nchan)
narrayB = np.flip(narrayB, axis=0)
narrayC = np.where(narrayB < 255, narrayA, alpha1*narrayA+alpha2*c)
return self._update(_get_img(narrayC))
def triangle(self, p1, p2, p3, c='red3', alpha=1):
"""Draw a triangle on top of current image. Units are pixels."""
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
r,g,b = vedo.colors.getColor(c)
c = np.array([r,g,b]) * 255
c = c.astype(np.uint8)
if alpha>1:
alpha=1
if alpha<=0:
return self
alpha2 = alpha
alpha1 = 1-alpha
nx, ny = self.dimensions()
if x1>nx : x1=nx
if x2>nx : x2=nx
if x3>nx : x3=nx
if y1>ny : y1=ny
if y2>ny : y2=ny
if y3>ny : y3=ny
nchan = self.channels()
narrayA = self.tonumpy()
canvas_source = vtk.vtkImageCanvasSource2D()
canvas_source.SetExtent(0, nx-1, 0, ny-1, 0, 0)
canvas_source.SetScalarTypeToUnsignedChar()
canvas_source.SetNumberOfScalarComponents(nchan)
canvas_source.SetDrawColor(255,255,255)
canvas_source.FillTriangle(x1, y1, x2, y2, x3, y3)
canvas_source.Update()
image_data = canvas_source.GetOutput()
vscals = image_data.GetPointData().GetScalars()
narrayB = vedo.utils.vtk2numpy(vscals).reshape(ny,nx,nchan)
narrayB = np.flip(narrayB, axis=0)
narrayC = np.where(narrayB < 255, narrayA, alpha1*narrayA+alpha2*c)
return self._update(_get_img(narrayC))
# def circle(self, center, radius, c='k3', alpha=1): # not working
# """Draw a box."""
# x1, y1 = center
#
# r,g,b = vedo.colors.getColor(c)
# c = np.array([r,g,b]) * 255
# c = c.astype(np.uint8)
#
# if alpha>1:
# alpha=1
# if alpha<=0:
# return self
# alpha2 = alpha
# alpha1 = 1-alpha
#
# nx, ny = self.dimensions()
# nchan = self.channels()
# narrayA = self.tonumpy()
#
# canvas_source = vtk.vtkImageCanvasSource2D()
# canvas_source.SetExtent(0, nx-1, 0, ny-1, 0, 0)
# canvas_source.SetScalarTypeToUnsignedChar()
# canvas_source.SetNumberOfScalarComponents(nchan)
# canvas_source.SetDrawColor(255,255,255)
# canvas_source.DrawCircle(x1, y1, radius)
# canvas_source.Update()
# image_data = canvas_source.GetOutput()
#
# vscals = image_data.GetPointData().GetScalars()
# narrayB = vedo.utils.vtk2numpy(vscals).reshape(ny,nx,nchan)
# narrayB = np.flip(narrayB, axis=0)
# narrayC = np.where(narrayB < 255, narrayA, alpha1*narrayA+alpha2*c)
# return self._update(_get_img(narrayC))
def text(self, txt,
pos=(0,0,0),
s=1,
c=None,
alpha=1,
bg=None,
font="Theemim",
dpi=500,
justify="bottom-left",
):
"""Build an image from a string."""
if c is None: # automatic black or white
if vedo.plotter_instance and vedo.plotter_instance.renderer:
c = (0.9, 0.9, 0.9)
if np.sum(vedo.plotter_instance.renderer.GetBackground()) > 1.5:
c = (0.1, 0.1, 0.1)
else:
c = (0.3, 0.3, 0.3)
r = vtk.vtkTextRenderer()
img = vtk.vtkImageData()
tp = vtk.vtkTextProperty()
tp.BoldOff()
tp.SetColor(colors.getColor(c))
tp.SetJustificationToLeft()
if "top" in justify:
tp.SetVerticalJustificationToTop()
if "bottom" in justify:
tp.SetVerticalJustificationToBottom()
if "cent" in justify:
tp.SetVerticalJustificationToCentered()
tp.SetJustificationToCentered()
if "left" in justify:
tp.SetJustificationToLeft()
if "right" in justify:
tp.SetJustificationToRight()
if font.lower() == "courier": tp.SetFontFamilyToCourier()
elif font.lower() == "times": tp.SetFontFamilyToTimes()
elif font.lower() == "arial": tp.SetFontFamilyToArial()
else:
tp.SetFontFamily(vtk.VTK_FONT_FILE)
tp.SetFontFile(utils.getFontPath(font))
if bg:
bgcol = colors.getColor(bg)
tp.SetBackgroundColor(bgcol)
tp.SetBackgroundOpacity(alpha * 0.5)
tp.SetFrameColor(bgcol)
tp.FrameOn()
#GetConstrainedFontSize (const vtkUnicodeString &str,
# vtkTextProperty *tprop, int targetWidth, int targetHeight, int dpi)
fs = r.GetConstrainedFontSize(txt, tp, 900, 1000, dpi)
tp.SetFontSize(fs)
r.RenderString(tp, txt, img, [1,1], dpi)
# RenderString (vtkTextProperty *tprop, const vtkStdString &str,
# vtkImageData *data, int textDims[2], int dpi, int backend=Default)
self.SetInputData(img)
self.GetMapper().Modified()
self.SetPosition(pos)
x0, x1 = self.xbounds()
if x1 != x0:
sc = s/(x1-x0)
self.SetScale(sc,sc,sc)
return self
def modified(self):
"""Use in conjunction with ``tonumpy()`` to update any modifications to the picture array"""
self._data.GetPointData().GetScalars().Modified()
return self
def write(self, filename):
"""Write picture to file as png or jpg."""
vedo.io.write(self._data, filename)
return self
|
import copy
import numpy as np
import cv2
from sklearn.metrics import confusion_matrix
def similarity(intersection, union):
if union > 0:
similarity = np.array(intersection/float(union)).item()
elif intersection == 0 and union == 0:
similarity = 1.0
else:
similarity = 0.0
return similarity
def jaccard(annotation, segmentation, iTypes, typeSimilarity):
intersections = 0
unions = 0
metrics = {}
unique = np.unique(annotation)
unique = np.append(unique,np.unique(segmentation))
unique = np.unique(unique)
#print(unique)
for i in unique:
if i in iTypes:
metric = {}
ta = np.zeros_like(annotation, dtype='bool')
tb = np.zeros_like(segmentation, dtype='bool')
ta[annotation == i] = 1 # Convert to binary mask
aa = ta.sum()
metric['annotation area'] = aa.item()
tb[segmentation == i] = 1 # Convert to binary mask
ab = tb.sum()
metric['segmentation area'] = ab.item()
andim = ta*tb # Logical AND
intersection = andim.sum()
metric['intersection'] = intersection.item()
intersections += int(intersection)
orim = ta + tb # Logical OR
union = orim.sum()
metric['union'] = union.item()
unions += int(union)
if i in typeSimilarity:
typeSimilarity[i]['intersection'] += int(intersection)
typeSimilarity[i]['union'] += int(union)
metric['similarity'] = similarity(intersection, union)
metrics[iTypes[i]['name']] = metric
iou = {}
iou['objects'] = metrics
iou['image'] = similarity(intersections, unions)
return iou, typeSimilarity, unique
def confusionmatrix(labels, segmentaiton, classes, total_confusion = None):
confusion = confusion_matrix(labels.flatten(),segmentaiton.flatten(), labels=classes)
if total_confusion is None:
total_confusion = confusion
else:
total_confusion += confusion
return confusion, total_confusion
def eval_records(fie, objTypes, records, resultsPath):
results = []
if not os.path.exists( resultsPath):
os.makedirs(resultsPath)
typeSimilarity = {}
for objType in objTypes:
typeSimilarity[objType['index']] = {'union':0, 'intersection':0}
for id, record in enumerate(records):
im = cv2.imread(record['image'],cv2.IMREAD_GRAYSCALE)
an = cv2.imread(record['annotation'],cv2.IMREAD_GRAYSCALE)
outputs = fie(tf.expand_dims(tf.constant(im),-1))
seg = np.squeeze(outputs['class_ids'].numpy())
record['similarity'], typeSimilarity, unique = jaccard(an, seg, iTypes, typeSimilarity)
resultsImg = ed.AnnotationPlots(objTypes, cv2.cvtColor(im,cv2.COLOR_GRAY2RGB), an, seg, resultsPath, record)
record['review'] = resultsImg
print('similarity: {:.2f}, unique: {} image: {}'.format(record['similarity']['image'], unique, record['image']))
results.append(record)
return results, typeSimilarity
def ColorizeAnnotation(ann, lut):
annrgb = [cv2.LUT(ann, lut[:, i]) for i in range(3)]
annrgb = np.dstack(annrgb)
return annrgb
def MergeIman(img, ann, lut, mean=None, stDev = None):
if mean is not None and stDev is not None:
img = (img*stDev) + mean
ann = ColorizeAnnotation(ann, lut)
img = (img*ann).astype(np.uint8)
return img
def MergeImAnSeg(img, ann, seg, lut, mean=None, stdev=None, ann_text='Label', seg_text='Segmentation'):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
iman = MergeIman(img, ann, lut, mean, stdev)
imseg = MergeIman(img, seg, lut, mean, stdev)
iman = cv2.putText(iman, ann_text,(10,25), font, 1,(255,255,255),1,cv2.LINE_AA)
imseg = cv2.putText(imseg, seg_text,(10,25), font, 1,(255,255,255),1,cv2.LINE_AA)
iman = cv2.hconcat([iman, imseg])
return iman
class DatasetResults:
# imgSave: save image to path defined in imgSave
# imRecord: recorde image in results
def __init__(self, class_dictionary, batch_size=1, imStatistics=False, imgSave=None, imRecord=False, task='segmentation'):
# Prepare datasets for similarity computation
self.class_dictionary = class_dictionary
self.batch_size = batch_size
self.imStatistics = imStatistics
self.imgSave = imgSave
self.imRecord = imRecord
self.task = task
# Process inputs for evaluation
self.classSimilarity = {}
self.objTypes = {}
for objType in class_dictionary['objects']:
if objType['trainId'] not in self.objTypes:
self.objTypes[objType['trainId']] = copy.deepcopy(objType)
# set name to category for objTypes and id to trainId
self.objTypes[objType['trainId']]['name'] = objType['category']
self.objTypes[objType['trainId']]['id'] = objType['trainId']
for i in self.objTypes:
self.classSimilarity[i]={'intersection':0, 'union':0}
self.num_classes = len(self.objTypes)
self.confusion_labels = range(self.num_classes)
self.CreateLut()
# Prepaire results data structures
self.typeSimilarity = {}
self.images = []
self.totalConfusion=None
self.dtSum =0
def CreateLut(self):
self.lut = np.zeros([256,3], dtype=np.uint8)
for obj in self.class_dictionary ['objects']: # Load RGB colors as BGR
self.lut[obj['trainId']][0] = obj['color'][2]
self.lut[obj['trainId']][1] = obj['color'][1]
self.lut[obj['trainId']][2] = obj['color'][0]
self.lut = self.lut.astype(float) * 1/255. # scale colors 0-1
self.lut[self.class_dictionary ['background']] = [1.0,1.0,1.0] # Pass Through
def infer_results(self, iBatch, images, labels, segmentations, mean, stdev, dt):
self.dtSum += dt
numImages = len(images)
dtImage = dt/numImages
for j in range(numImages):
result = {'dt':dtImage}
if self.imgSave is not None or self.imRecord:
imanseg = MergeImAnSeg(images[j], labels[j], segmentations[j], self.lut, mean[j], stdev[j])
if self.imgSave is not None:
savename = '{}/{}{:04d}.png'.format(self.imgSave, self.task, self.batch_size*iBatch+j)
cv2.imwrite(savename, imanseg)
if self.imRecord:
result['image'] = imanseg
result['similarity'], self.classSimilarity, unique = jaccard(labels[j], segmentations[j], self.objTypes, self.classSimilarity)
confusion = confusion_matrix(labels[j].flatten(),segmentations[j].flatten(), labels=self.confusion_labels)
if self.totalConfusion is None:
self.totalConfusion = confusion
else:
self.totalConfusion += confusion
result['confusion'] = confusion.tolist()
self.images.append(result)
return self.totalConfusion
def Results(self):
dataset_similarity = {}
num_images = len(self.images)
average_time = self.dtSum/num_images
sumIntersection = 0
sumUnion = 0
miou = 0
for key in self.classSimilarity:
intersection = self.classSimilarity[key]['intersection']
sumIntersection += intersection
union = self.classSimilarity[key]['union']
sumUnion += union
class_similarity = similarity(intersection, union)
miou += class_similarity
# convert to int from int64 for json.dumps
dataset_similarity[key] = {'intersection':int(intersection) ,'union':int(union) , 'similarity':class_similarity}
# miou computation
positives = np.diagonal(self.totalConfusion)
total = np.sum(self.totalConfusion,0)+np.sum(self.totalConfusion,1)-positives
# Remoze zero values
positives = positives[np.nonzero(total)]
total = total[np.nonzero(total)]
iou = positives/total
miou =np.sum(iou)/self.num_classes
results = {'confusion':self.totalConfusion.tolist(),
'similarity':dataset_similarity,
'average time': average_time,
'miou': miou.item(),
'num images': num_images,
}
if self.imStatistics:
results['images'] = self.images,
return results
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.AttributeInfo import AttributeInfo
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.ComplexField import ComplexField
from OTLMOW.OTLModel.Datatypes.DtcAdres import DtcAdres
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcNatuurlijkPersoonWaarden(AttributeInfo):
def __init__(self, parent=None):
AttributeInfo.__init__(self, parent)
self._achternaam = OTLAttribuut(field=StringField,
naam='achternaam',
label='achternaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.achternaam',
definition='De achternaam.',
owner=self)
self._adres = OTLAttribuut(field=DtcAdres,
naam='adres',
label='adres',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.adres',
kardinaliteit_max='*',
definition='Het adres.',
owner=self)
self._emailadres = OTLAttribuut(field=StringField,
naam='emailadres',
label='emailadres',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.emailadres',
kardinaliteit_max='*',
definition='Het emailadres.',
owner=self)
self._fax = OTLAttribuut(field=StringField,
naam='fax',
label='fax',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.fax',
kardinaliteit_max='*',
definition='De faxnummer.',
owner=self)
self._heeftEmailVoorkeur = OTLAttribuut(field=BooleanField,
naam='heeftEmailVoorkeur',
label='heeft email voorkeur',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.heeftEmailVoorkeur',
definition='Aanduiding of een persoon de voorkeur heeft om via email gecontacteerd te worden.',
owner=self)
self._heeftFaxVoorkeur = OTLAttribuut(field=BooleanField,
naam='heeftFaxVoorkeur',
label='heeft fax voorkeur',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.heeftFaxVoorkeur',
definition='Aanduiding of een persoon een voorkeur heeft om via fax gegevens te ontvangen.',
owner=self)
self._telefoonnnummer = OTLAttribuut(field=StringField,
naam='telefoonnnummer',
label='telefoonnnummer',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.telefoonnnummer',
kardinaliteit_max='*',
definition='Het telefoonnummer.',
owner=self)
self._voornaam = OTLAttribuut(field=StringField,
naam='voornaam',
label='voornaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon.voornaam',
definition='De voornaam.',
owner=self)
@property
def achternaam(self):
"""De achternaam."""
return self._achternaam.get_waarde()
@achternaam.setter
def achternaam(self, value):
self._achternaam.set_waarde(value, owner=self._parent)
@property
def adres(self):
"""Het adres."""
return self._adres.get_waarde()
@adres.setter
def adres(self, value):
self._adres.set_waarde(value, owner=self._parent)
@property
def emailadres(self):
"""Het emailadres."""
return self._emailadres.get_waarde()
@emailadres.setter
def emailadres(self, value):
self._emailadres.set_waarde(value, owner=self._parent)
@property
def fax(self):
"""De faxnummer."""
return self._fax.get_waarde()
@fax.setter
def fax(self, value):
self._fax.set_waarde(value, owner=self._parent)
@property
def heeftEmailVoorkeur(self):
"""Aanduiding of een persoon de voorkeur heeft om via email gecontacteerd te worden."""
return self._heeftEmailVoorkeur.get_waarde()
@heeftEmailVoorkeur.setter
def heeftEmailVoorkeur(self, value):
self._heeftEmailVoorkeur.set_waarde(value, owner=self._parent)
@property
def heeftFaxVoorkeur(self):
"""Aanduiding of een persoon een voorkeur heeft om via fax gegevens te ontvangen."""
return self._heeftFaxVoorkeur.get_waarde()
@heeftFaxVoorkeur.setter
def heeftFaxVoorkeur(self, value):
self._heeftFaxVoorkeur.set_waarde(value, owner=self._parent)
@property
def telefoonnnummer(self):
"""Het telefoonnummer."""
return self._telefoonnnummer.get_waarde()
@telefoonnnummer.setter
def telefoonnnummer(self, value):
self._telefoonnnummer.set_waarde(value, owner=self._parent)
@property
def voornaam(self):
"""De voornaam."""
return self._voornaam.get_waarde()
@voornaam.setter
def voornaam(self, value):
self._voornaam.set_waarde(value, owner=self._parent)
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcNatuurlijkPersoon(ComplexField, AttributeInfo):
"""Complex datatype dat een natuurlijk persoon beschrijft."""
naam = 'DtcNatuurlijkPersoon'
label = 'Natuurlijk persoon'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcNatuurlijkPersoon'
definition = 'Complex datatype dat een natuurlijk persoon beschrijft.'
waardeObject = DtcNatuurlijkPersoonWaarden
def __str__(self):
return ComplexField.__str__(self)
|
import sys
import pytest
import yaml
from prefect.run_configs import KubernetesRun
def test_no_args():
config = KubernetesRun()
assert config.job_template_path is None
assert config.job_template is None
assert config.image is None
assert config.env is None
assert config.cpu_limit is None
assert config.cpu_request is None
assert config.memory_limit is None
assert config.memory_request is None
assert config.service_account_name is None
assert config.image_pull_secrets is None
assert config.labels == set()
assert config.image_pull_policy is None
def test_labels():
config = KubernetesRun(labels=["a", "b"])
assert config.labels == {"a", "b"}
def test_cant_specify_both_job_template_and_job_template_path():
with pytest.raises(ValueError, match="Cannot provide both"):
KubernetesRun(job_template={}, job_template_path="/some/path")
def test_remote_job_template_path():
config = KubernetesRun(job_template_path="s3://bucket/example.yaml")
assert config.job_template_path == "s3://bucket/example.yaml"
assert config.job_template is None
@pytest.mark.parametrize("scheme", ["local", "file", None])
def test_local_job_template_path(tmpdir, scheme):
job_template = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {"example": "foo"}},
}
path = str(tmpdir.join("test.yaml"))
if scheme is None:
job_template_path = path
else:
if sys.platform == "win32":
pytest.skip("Schemes are not supported on win32")
job_template_path = f"{scheme}://" + path
with open(path, "w") as f:
yaml.safe_dump(job_template, f)
config = KubernetesRun(job_template_path=job_template_path)
assert config.job_template_path is None
assert config.job_template == job_template
@pytest.mark.parametrize("kind", [dict, str])
def test_job_template(kind):
job_template = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {"example": "foo"}},
}
arg = job_template if kind is dict else yaml.safe_dump(job_template)
config = KubernetesRun(job_template=arg)
assert config.job_template_path is None
assert config.job_template == job_template
def test_cpu_limit_and_request_acceptable_types():
config = KubernetesRun()
assert config.cpu_limit is None
assert config.cpu_request is None
config = KubernetesRun(cpu_limit="200m", cpu_request="100m")
assert config.cpu_limit == "200m"
assert config.cpu_request == "100m"
config = KubernetesRun(cpu_limit=0.5, cpu_request=0.1)
assert config.cpu_limit == "0.5"
assert config.cpu_request == "0.1"
def test_service_account_name_and_image_pull_secrets():
config = KubernetesRun(
service_account_name="my-account", image_pull_secrets=("a", "b", "c")
)
assert config.service_account_name == "my-account"
assert config.image_pull_secrets == ["a", "b", "c"]
# Ensure falsey-lists aren't converted to `None`.
config = KubernetesRun(image_pull_secrets=[])
assert config.image_pull_secrets == []
@pytest.mark.parametrize("image_pull_policy", ["Always", "IfNotPresent", "Never"])
def test_image_pull_policy_valid_value(image_pull_policy):
config = KubernetesRun(image_pull_policy=image_pull_policy)
assert config.image_pull_policy == image_pull_policy
def test_image_pull_policy_invalid_value():
with pytest.raises(ValueError):
KubernetesRun(image_pull_policy="WrongPolicy")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Binding']
class Binding(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_name: Optional[pulumi.Input[str]] = None,
binding_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['BindingResourcePropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Binding resource payload
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_name: The name of the App resource.
:param pulumi.Input[str] binding_name: The name of the Binding resource.
:param pulumi.Input[pulumi.InputType['BindingResourcePropertiesArgs']] properties: Properties of the Binding resource
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] service_name: The name of the Service resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if app_name is None and not opts.urn:
raise TypeError("Missing required property 'app_name'")
__props__['app_name'] = app_name
__props__['binding_name'] = binding_name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:appplatform/v20201101preview:Binding"), pulumi.Alias(type_="azure-native:appplatform:Binding"), pulumi.Alias(type_="azure-nextgen:appplatform:Binding"), pulumi.Alias(type_="azure-native:appplatform/latest:Binding"), pulumi.Alias(type_="azure-nextgen:appplatform/latest:Binding"), pulumi.Alias(type_="azure-native:appplatform/v20190501preview:Binding"), pulumi.Alias(type_="azure-nextgen:appplatform/v20190501preview:Binding"), pulumi.Alias(type_="azure-native:appplatform/v20200701:Binding"), pulumi.Alias(type_="azure-nextgen:appplatform/v20200701:Binding")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Binding, __self__).__init__(
'azure-native:appplatform/v20201101preview:Binding',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Binding':
"""
Get an existing Binding resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["properties"] = None
__props__["type"] = None
return Binding(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.BindingResourcePropertiesResponse']:
"""
Properties of the Binding resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# -*- coding: utf-8 -*-
import random, sys
from psychopy import core, event, gui, visual, logging
#window
win = visual.Window(size = (1200,800), color = 'black', units = 'pix')
win.setRecordFrameIntervals(True)
win._refreshThreshold=1/85.0+0.004 #i've got 85Hz monitor and want to allow 4ms tolerance
#set the log module to report warnings to the std output window (default is errors only)
logging.console.setLevel(logging.WARNING)
#timer
RT = core.Clock()
# parameters
numTrials=25
frameRate = 85 # Hz
stimDuration = 5 # secs
blankDuration = 2.5 # secs
stimFrames = int(frameRate * stimDuration)
blankFrames = int(frameRate * blankDuration) # rounds down / floor. 212.5 becomes 212.
totalFrames = stimFrames + blankFrames
def experiment():
list =[random.randint(0,10) for r in xrange(numTrials)]
#list = [2,3,2,3,2,3,2,3,4,5,6,7,8,9,8]
keyList=[]
demoDisp = visual.TextStim(win, text = '', height = 100) #textstim object that will be displayed
counter = 0 # Counter is unused?
for x in list:
# PREPARE STIMULUS
demoDisp.setText(x)
# PRESENT STIMULUS
for frameN in range(stimFrames):
# Display stimulus
demoDisp.draw()
win.flip()
# Reset timer on onset
if frameN == 0:
RT.reset()
# PRESENT BLANK
for frameN in range(blankFrames):
# Blank screen with no response recording
win.flip()
# RECORD RESPONSES
keyList = event.getKeys(timeStamped=RT)
# keyList[N][Y], N is response number if multiple keys were pressed. Y=0 is key-name. Y=1 is time
if keyList:
print keyList[0][0], 'key was pressed after', str(keyList[0][1]), 'seconds'
else:
print 'no key was pressed'
experiment()
|
import random
class Default(object):
@staticmethod
def default(**kwargs):
return kwargs.get('value', None)
@staticmethod
def random(**kwargs):
arr = kwargs.get('array', None)
if arr is None:
raise Exception('default: array is None')
array_len = len(arr)
if array_len > 0:
array_len = array_len - 1
return arr[random.randint(0, array_len)]
|
from .default import DefaultAttackEval
from .invoke_limit_eval import InvokeLimitedAttackEval
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'daal4py'
copyright = '2021, Intel'
author = 'Intel'
# The short X.Y version
version = '2021'
# The full version, including alpha/beta/rc tags
release = '2021.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"sidebarwidth": 30,
"nosidebar": False,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/style.css'
]
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_sidebars = {
'**': [
'globaltoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html'
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'daal4pydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'daal4py.tex', 'daal4py Documentation',
'Intel', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'daal4py', 'daal4py Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'daal4py', 'daal4py Documentation',
author, 'daal4py', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BilliecoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BilliecoinTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'ycwedq2f3sz2Yf9JqZsBCQPxp18WU3Hp4J': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'ycwedq2f3sz2Yf9JqZsBCQPxp18WU3Hp4J': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
import shutil
from pathlib import Path
from guessit import guessit
class MediaWatcherMover(object):
"""
Where the magic happens.
Either scan a given list of folders or lookup a specific file.
"""
def __init__(self, config, logger):
self.config = config
self.logger = logger
def scan(self):
"""
Recursively scan a list of folders.
"""
paths = [Path(directory) for directory in self.config.watch]
def recursive(r_path):
self.logger.info('Scanning {}'.format(r_path))
for child in r_path.iterdir():
if child.is_dir():
recursive(child)
self.lookup(child)
for path in paths:
recursive(path)
self.logger.info('Done scanning.')
def lookup(self, path):
"""
Dispatch the path based on the guess of its type
:param path: The path to look up
"""
if set(path.suffixes).isdisjoint({'.mkv', '.mp4', '.avi'}):
self.logger.info('File ignored: {}'.format(path))
return
guess = guessit(str(path))
if guess['type'] == 'movie':
self.lookup_movie(path, guess)
elif guess['type'] == 'episode':
if 'season' not in guess:
self.lookup_anime(path, guess)
else:
self.lookup_tv(path, guess)
def lookup_movie(self, path, guess):
self.logger.info('Looking up movie: {}'.format(path))
move_folder = Path(self.config.output_movies) / Path(guess['title'])
if not move_folder.exists():
move_folder.mkdir()
move_path = move_folder / Path('{}.{}'.format(guess['title'], guess['container']))
if move_path.exists():
raise FileExistsError('{} already exists, ignoring.'.format(move_path))
if not self.config.mock:
shutil.move(str(path), str(move_path))
self.logger.info('Moved movie from {} to {}'.format(path, move_path))
def lookup_tv(self, path, guess):
self.logger.info('Looking up tv episode: {}'.format(path))
move_folder = Path(self.config.output_tv) / Path(guess['title'])
if not move_folder.exists():
move_folder.mkdir()
move_folder /= Path('Season {:02d}'.format(guess['season']))
if not move_folder.exists():
move_folder.mkdir()
move_path = move_folder / Path(
'{} s{:02d}e{:02d}.{}'.format(guess['title'], guess['season'], guess['episode'], guess['container']))
if move_path.exists():
raise FileExistsError('{} already exists, ignoring.'.format(move_path))
if not self.config.mock:
shutil.move(str(path), str(move_path))
self.logger.info('Moved tv episode from {} to {}'.format(path, move_path))
def lookup_anime(self, path, guess):
self.logger.info('Looking up anime episode: {}'.format(path))
move_folder = Path(self.config.output_anime) / Path(guess['title'])
if not move_folder.exists():
move_folder.mkdir()
move_path = move_folder / Path(
'{} - {:02d}.{}'.format(guess['title'], guess['episode'], guess['container']))
if move_path.exists():
raise FileExistsError('{} already exists, ignoring.'.format(move_path))
if not self.config.mock:
shutil.move(str(path), str(move_path))
self.logger.info('Moved anime episode from {} to {}'.format(path, move_path))
|
from common.core import AbstractPlugin
from common.core import classReplacements
from functools import reduce
import timeit
URLUtils = classReplacements.get_class('URLUtils')
class TimeMePlugin(AbstractPlugin):
def should_run(self):
return isinstance(self.item_options.get('timeme'), dict)
def check(self):
timeme = self.item_options['timeme']
requests = timeme['requests'] if 'requests' in timeme else 5
limit_max = timeme.get('limit_max')
limit_avg = timeme.get('limit_avg')
from requests import Session
s = Session()
request = URLUtils.prepare_request(self.url, self.global_options, self.item_options)
times = timeit.repeat(stmt=lambda:s.send(request, timeout=30, allow_redirects=True),
repeat=requests, number=1)
request_max = max(times)
request_avg = reduce(lambda x, y: x + y, times) / len(times)
if limit_max and request_max > limit_max:
self.fail("Maximum request time greater than limit: {0} > {1}".format(request_max, limit_max))
if limit_avg and request_avg > limit_avg:
self.fail("Average request time greater than limit: {0} > {1}".format(request_avg, limit_avg))
return self.is_ok()
|
from mkdocs.config import base, config_options, Config
if __name__ == '__main__':
config_scheme = (
('doxygen-source', config_options.Type(str, default='')),
('api-path', config_options.Type(str, default='api')),
('target', config_options.Type(str, default='mkdocs')),
('full-doc', config_options.Type(bool, default=False)),
('hints', config_options.Type(bool, default=False)),
('debug', config_options.Type(bool, default=False)),
('ignore-errors', config_options.Type(bool, default=False)),
('link-prefix', config_options.Type(str, default='')),
)
config_ok = {'doxygen-source': 'src-esp', 'api-path': 'api', 'target': 'mkdocs', 'full-doc': True, 'hints': True, 'debug': False, 'ignore-errors': False}
config_false = {'doxygen-source': False, 'api-path': 'api', 'target': 'mkdocs', 'full-doc': 'asd', 'hints': True, 'debug': False, 'ignore-errors': False}
cfg = Config(config_scheme, '')
cfg.load_dict(config_false)
# cfg.load_dict(config_ok)
print(cfg.validate())
|
#!/usr/bin/env python
# Copyright 2016 Vijayaditya Peddinti.
# 2016 Vimal Manohar
# Apache 2.0.
""" This script is similar to steps/nnet3/train_dnn.py but trains a
raw neural network instead of an acoustic model.
"""
from __future__ import print_function
import argparse
import logging
import pprint
import os
import sys
import traceback
sys.path.insert(0, 'steps')
import libs.nnet3.train.common as common_train_lib
import libs.common as common_lib
import libs.nnet3.train.frame_level_objf as train_lib
import libs.nnet3.report.log_parse as nnet3_log_parse
logger = logging.getLogger('libs')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting raw DNN trainer (train_raw_dnn.py)')
def get_args():
""" Get args from stdin.
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py
"""
parser = argparse.ArgumentParser(
description="""Trains a feed forward raw DNN (without transition model)
using frame-level objectives like cross-entropy and mean-squared-error.
DNNs include simple DNNs, TDNNs and CNNs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
# egs extraction options
parser.add_argument("--egs.frames-per-eg", type=int, dest='frames_per_eg',
default=8,
help="Number of output labels per example")
parser.add_argument("--image.augmentation-opts", type=str,
dest='image_augmentation_opts',
default=None,
help="Image augmentation options")
# trainer options
parser.add_argument("--trainer.prior-subset-size", type=int,
dest='prior_subset_size', default=20000,
help="Number of samples for computing priors")
parser.add_argument("--trainer.num-jobs-compute-prior", type=int,
dest='num_jobs_compute_prior', default=10,
help="The prior computation jobs are single "
"threaded and run on the CPU")
# Parameters for the optimization
parser.add_argument("--trainer.optimization.minibatch-size",
type=str, dest='minibatch_size', default='512',
help="""Size of the minibatch used in SGD training
(argument to nnet3-merge-egs); may be a more general
rule as accepted by the --minibatch-size option of
nnet3-merge-egs; run that program without args to see
the format.""")
parser.add_argument("--compute-average-posteriors",
type=str, action=common_lib.StrToBoolAction,
choices=["true", "false"], default=False,
help="""If true, then the average output of the
network is computed and dumped as post.final.vec""")
# General options
parser.add_argument("--nj", type=int, default=4,
help="Number of parallel jobs")
parser.add_argument("--use-dense-targets", type=str,
action=common_lib.StrToBoolAction,
default=True, choices=["true", "false"],
help="Train neural network using dense targets")
parser.add_argument("--feat-dir", type=str, required=False,
help="Directory with features used for training "
"the neural network.")
parser.add_argument("--targets-scp", type=str, required=False,
help="Targets for training neural network.")
parser.add_argument("--dir", type=str, required=True,
help="Directory to store the models and "
"all other files.")
print(' '.join(sys.argv))
print(sys.argv)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts]
def process_args(args):
""" Process the options got from get_args()
"""
if args.frames_per_eg < 1:
raise Exception("--egs.frames-per-eg should have a minimum value of 1")
if not common_train_lib.validate_minibatch_size_str(args.minibatch_size):
raise Exception("--trainer.optimization.minibatch-size has an invalid value")
if (not os.path.exists(args.dir)
or not os.path.exists(args.dir+"/configs")):
raise Exception("This scripts expects {0} to exist and have a configs "
"directory which is the output of "
"make_configs.py script")
# set the options corresponding to args.use_gpu
run_opts = common_train_lib.RunOpts()
if args.use_gpu:
if not common_lib.check_if_cuda_compiled():
logger.warning(
"""You are running with one thread but you have not compiled
for CUDA. You may be running a setup optimized for GPUs.
If you have GPUs and have nvcc installed, go to src/ and do
./configure; make""")
run_opts.train_queue_opt = "--gpu 1"
run_opts.parallel_train_opts = ""
run_opts.combine_queue_opt = "--gpu 1"
run_opts.prior_gpu_opt = "--use-gpu=yes"
run_opts.prior_queue_opt = "--gpu 1"
else:
logger.warning("Without using a GPU this will be very slow. "
"nnet3 does not yet support multiple threads.")
run_opts.train_queue_opt = ""
run_opts.parallel_train_opts = "--use-gpu=no"
run_opts.combine_queue_opt = ""
run_opts.prior_gpu_opt = "--use-gpu=no"
run_opts.prior_queue_opt = ""
run_opts.command = args.command
run_opts.egs_command = (args.egs_command
if args.egs_command is not None else
args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts]
def train(args, run_opts):
""" The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args()
"""
arg_string = pprint.pformat(vars(args))
logger.info("Arguments for the experiment\n{0}".format(arg_string))
# Set some variables.
# note, feat_dim gets set to 0 if args.feat_dir is unset (None).
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
# Set some variables.
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined in "
"{1}".format(str(e), '{0}/configs'.format(args.dir)))
left_context = model_left_context
right_context = model_right_context
# Initialize as "raw" nnet, prior to training the LDA-like preconditioning
# matrix. This first config just does any initial splicing that we do;
# we do this as it's a convenient way to get the stats for the 'lda-like'
# transform.
if (args.stage <= -5) and os.path.exists(args.dir+"/configs/init.config"):
logger.info("Initializing the network for computing the LDA stats")
common_lib.execute_command(
"""{command} {dir}/log/nnet_init.log \
nnet3-init --srand=-2 {dir}/configs/init.config \
{dir}/init.raw""".format(command=run_opts.command,
dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if (args.stage <= -4) and args.egs_dir is None:
if args.targets_scp is None or args.feat_dir is None:
raise Exception("If you don't supply the --egs-dir option, the "
"--targets-scp and --feat-dir options are required.")
logger.info("Generating egs")
if args.use_dense_targets:
target_type = "dense"
try:
num_targets = int(variables['num_targets'])
if (common_lib.get_feat_dim_from_scp(args.targets_scp)
!= num_targets):
raise Exception("Mismatch between num-targets provided to "
"script vs configs")
except KeyError as e:
num_targets = -1
else:
target_type = "sparse"
try:
num_targets = int(variables['num_targets'])
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined "
"in {1}".format(
str(e), '{0}/configs'.format(args.dir)))
train_lib.raw_model.generate_egs_using_targets(
data=args.feat_dir, targets_scp=args.targets_scp,
egs_dir=default_egs_dir,
left_context=left_context, right_context=right_context,
run_opts=run_opts,
frames_per_eg_str=str(args.frames_per_eg),
srand=args.srand,
egs_opts=args.egs_opts,
cmvn_opts=args.cmvn_opts,
online_ivector_dir=args.online_ivector_dir,
samples_per_iter=args.samples_per_iter,
transform_dir=args.transform_dir,
stage=args.egs_stage,
target_type=target_type,
num_targets=num_targets)
if args.egs_dir is None:
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context,
frames_per_eg_str, num_archives] = (
common_train_lib.verify_egs_dir(egs_dir, feat_dim,
ivector_dim, ivector_id,
left_context, right_context))
assert str(args.frames_per_eg) == frames_per_eg_str
if args.num_jobs_final > num_archives:
raise Exception('num_jobs_final cannot exceed the number of archives '
'in the egs directory')
# copy the properties of the egs to dir for
# use during decoding
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if args.stage <= -3 and os.path.exists(args.dir+"/configs/init.config"):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(
args.dir, egs_dir, num_archives, run_opts,
max_lda_jobs=args.max_lda_jobs,
rand_prune=args.rand_prune)
if args.stage <= -1:
logger.info("Preparing the initial network.")
common_train_lib.prepare_initial_network(args.dir, run_opts)
# set num_iters so that as close as possible, we process the data
# $num_epochs times, i.e. $num_iters*$avg_num_jobs) ==
# $num_epochs*$num_archives, where
# avg_num_jobs=(num_jobs_initial+num_jobs_final)/2.
num_archives_expanded = num_archives * args.frames_per_eg
num_archives_to_process = int(args.num_epochs * num_archives_expanded)
num_archives_processed = 0
num_iters = ((num_archives_to_process * 2)
/ (args.num_jobs_initial + args.num_jobs_final))
# If do_final_combination is True, compute the set of models_to_combine.
# Otherwise, models_to_combine will be none.
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(
num_iters, args.num_epochs,
num_archives_expanded, args.max_models_combine,
args.num_jobs_final)
else:
models_to_combine = None
if os.path.exists('{0}/valid_diagnostic.scp'.format(args.egs_dir)):
if os.path.exists('{0}/valid_diagnostic.egs'.format(args.egs_dir)):
raise Exception('both {0}/valid_diagnostic.egs and '
'{0}/valid_diagnostic.scp exist.'
'This script expects only one of them to exist.'
''.format(args.egs_dir))
use_multitask_egs = True
else:
if not os.path.exists('{0}/valid_diagnostic.egs'.format(args.egs_dir)):
raise Exception('neither {0}/valid_diagnostic.egs nor '
'{0}/valid_diagnostic.scp exist.'
'This script expects one of them.'
''.format(args.egs_dir))
use_multitask_egs = False
logger.info("Training will run for {0} epochs = "
"{1} iterations".format(args.num_epochs, num_iters))
for iter in range(num_iters):
if (args.exit_stage is not None) and (iter == args.exit_stage):
logger.info("Exiting early due to --exit-stage {0}".format(iter))
return
current_num_jobs = int(0.5 + args.num_jobs_initial
+ (args.num_jobs_final - args.num_jobs_initial)
* float(iter) / num_iters)
if args.stage <= iter:
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs,
num_iters,
num_archives_processed,
num_archives_to_process,
args.initial_effective_lrate,
args.final_effective_lrate)
shrinkage_value = 1.0 - (args.proportional_shrink * lrate)
if shrinkage_value <= 0.5:
raise Exception("proportional-shrink={0} is too large, it gives "
"shrink-value={1}".format(args.proportional_shrink,
shrinkage_value))
train_lib.common.train_one_iteration(
dir=args.dir,
iter=iter,
srand=args.srand,
egs_dir=egs_dir,
num_jobs=current_num_jobs,
num_archives_processed=num_archives_processed,
num_archives=num_archives,
learning_rate=lrate,
dropout_edit_string=common_train_lib.get_dropout_edit_string(
args.dropout_schedule,
float(num_archives_processed) / num_archives_to_process,
iter),
minibatch_size_str=args.minibatch_size,
frames_per_eg=args.frames_per_eg,
momentum=args.momentum,
max_param_change=args.max_param_change,
shrinkage_value=shrinkage_value,
shuffle_buffer_size=args.shuffle_buffer_size,
run_opts=run_opts,
get_raw_nnet_from_am=False,
image_augmentation_opts=args.image_augmentation_opts,
use_multitask_egs=use_multitask_egs,
backstitch_training_scale=args.backstitch_training_scale,
backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
# do a clean up everything but the last 2 models, under certain
# conditions
common_train_lib.remove_model(
args.dir, iter-2, num_iters, models_to_combine,
args.preserve_model_interval,
get_raw_nnet_from_am=False)
if args.email is not None:
reporting_iter_interval = num_iters * args.reporting_interval
if iter % reporting_iter_interval == 0:
# lets do some reporting
[report, times, data] = (
nnet3_log_parse.generate_acc_logprob_report(args.dir))
message = report
subject = ("Update : Expt {dir} : "
"Iter {iter}".format(dir=args.dir, iter=iter))
common_lib.send_mail(message, subject, args.email)
num_archives_processed = num_archives_processed + current_num_jobs
if args.stage <= num_iters:
if args.do_final_combination:
logger.info("Doing final combination to produce final.raw")
train_lib.common.combine_models(
dir=args.dir, num_iters=num_iters,
models_to_combine=models_to_combine, egs_dir=egs_dir,
minibatch_size_str=args.minibatch_size, run_opts=run_opts,
get_raw_nnet_from_am=False,
sum_to_one_penalty=args.combine_sum_to_one_penalty,
use_multitask_egs=use_multitask_egs)
else:
common_lib.force_symlink("{0}.raw".format(num_iters),
"{0}/final.raw".format(args.dir))
if args.compute_average_posteriors and args.stage <= num_iters + 1:
logger.info("Getting average posterior for output-node 'output'.")
train_lib.common.compute_average_posterior(
dir=args.dir, iter='final', egs_dir=egs_dir,
num_archives=num_archives,
prior_subset_size=args.prior_subset_size, run_opts=run_opts,
get_raw_nnet_from_am=False)
if args.cleanup:
logger.info("Cleaning up the experiment directory "
"{0}".format(args.dir))
remove_egs = args.remove_egs
if args.egs_dir is not None:
# this egs_dir was not created by this experiment so we will not
# delete it
remove_egs = False
common_train_lib.clean_nnet_dir(
nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir,
preserve_model_interval=args.preserve_model_interval,
remove_egs=remove_egs,
get_raw_nnet_from_am=False)
# do some reporting
outputs_list = common_train_lib.get_outputs_list("{0}/final.raw".format(
args.dir), get_raw_nnet_from_am=False)
if 'output' in outputs_list:
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(
args.dir)
if args.email is not None:
common_lib.send_mail(report, "Update : Expt {0} : "
"complete".format(args.dir),
args.email)
with open("{dir}/accuracy.{output_name}.report".format(dir=args.dir,
output_name="output"),
"w") as f:
f.write(report)
common_lib.execute_command("steps/info/nnet3_dir_info.pl "
"{0}".format(args.dir))
def main():
[args, run_opts] = get_args()
try:
train(args, run_opts)
common_lib.wait_for_background_commands()
except BaseException as e:
# look for BaseException so we catch KeyboardInterrupt, which is
# what we get when a background thread dies.
if args.email is not None:
message = ("Training session for experiment {dir} "
"died due to an error.".format(dir=args.dir))
common_lib.send_mail(message, message, args.email)
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
|
"""
WSGI config for nitjcompiler project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nitjcompiler.settings")
application = get_wsgi_application()
|
"""
After fine-tuning, all of our model weights, up until the last conv. layer,
are contained within a single layer model object, making it difficult for Keras
to load in these layer weights by name for any subsequent fine-tuning. Here,
the model is loaded, the interior model is extracted,
and its weights are saved.
Usage: python extracted_embedded_model path_to_full_model
"""
# from keras.models import model_from_json
from tensorflow.keras.models import load_model
import dannce.engine.ops as ops
import dannce.engine.nets as nets
import dannce.engine.losses as losses
import sys
if __name__ == "__main__":
mdl = sys.argv[1]
newmdl = mdl.split(".hdf5")[0] + "_coremodel.hdf5"
model = load_model(
mdl,
custom_objects={
"ops": ops,
"slice_input": nets.slice_input,
"mask_nan_keep_loss": losses.mask_nan_keep_loss,
"euclidean_distance_3D": losses.euclidean_distance_3D,
"centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D,
},
)
model.layers[1].save_weights(newmdl)
print("Extracted and wrote new model to: " + newmdl)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="metarace",
version="2.0.1",
author="Nathan Fraser",
author_email="ndf@metarace.com.au",
url="https://github.com/ndf-zz/metarace",
description="Cycle race abstractions",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Topic :: Other/Nonlisted Topic',
],
python_requires='>=3.0',
zip_safe=True,
install_requires=[
'serial', 'rsvg', 'cairo', 'pango', 'pangocairo', 'xlwt',
],
)
|
from . import empty
from .utils import (identifiers,
positive_integers,
unique_objects)
|
import sklearn.metrics as sk_metrics
import numpy as np
from tqdm import tqdm
from .average_meter import AverageMeter
from .base_evaluator import BaseEvaluator
from .sksurv.metrics import concordance_index_censored
from sklearn.linear_model import LinearRegression
class RegressionEvaluator(BaseEvaluator):
"""Class for evaluating regression models during train and test"""
def __init__(self, data_loaders, logger, device=None):
"""
Args:
data_loaders: DataLoader Class, iterable and returns (input target)
logger: logs values
device: Device on which to evaluate the model
"""
#TODO: iter_per_eval is not so relevant, find a way to safely depricate"
super().__init__(data_loaders, logger)
self.device = device
def _eval_split(self, model, data_loader, split, dataset_name, device,
results_dir='', report_probabilities=False):
"""Evaluate a model for a single data split
Args:
model: Model to evaluate
data_loader: DataLoader to sample from
split: Split to evaluate ("train", "val", or "test")
dataset_name: Name of dataset to be evaluated
device: Device on which to evaluate the model
Returns:
metrics, curves: Dictionaries with evaluation metrics and curves respectively
"""
records = {'targets': [], 'preds': []}
num_examples = len(data_loader.dataset)
# sample from the data loader and record model outputs
num_evaluated = 0
with tqdm(total=num_examples, unit=' ' + split + ' ' + dataset_name) as progress_bar:
for data in data_loader:
if num_evaluated >= num_examples:
break
inputs, targets = data
batch_preds = model.predict(inputs)
# record
records['preds'].append(batch_preds)
records['targets'].append(targets)
# expects list or numpy array
progress_bar.update(len(targets))
num_evaluated += len(targets)
# Map to summary dictionaries
metrics, curves = self._get_summary_dicts(self, split, dataset_name, **records)
return metrics, curves
@staticmethod
def _get_summary_dicts(self, split, dataset_name,
preds, targets):
"""Get summary dictionaries given dictionary of records kept during evaluation
Args:
data_loader: DataLoader to sample from
split: Split being evaluated
dataset_name: Name of dataset to be evaluated
preds: The predicted values from model
targets: The groundtruth as a numpy array
Returns:
metrics: Dictionary of metrics for the current model.
curves: Dictionary of curves for the current model
"""
# use all available metrics
eval_metrics = self._get_eval_functions().keys()
metrics, curves = {}, {}
preds = np.concatenate(preds)
targets = np.concatenate(targets)
data_subset = f'{dataset_name}-{split}'
# assume single task
for metric in eval_metrics:
self._update_metric(metrics, metric, targets, preds, data_subset)
return metrics, curves
def _update_metric(self, dict_to_update, eval_metric, task_targets, task_preds, data_subset):
"""Calls eval fn and updates the metric dict with appropriate name and value"""
fn_dict_preds = self._get_eval_functions()
if eval_metric in fn_dict_preds:
inputs = task_preds
eval_fn = fn_dict_preds[eval_metric]
try:
dict_to_update.update({
data_subset + '_' + eval_metric: eval_fn(task_targets, inputs)
})
except Exception as e:
print(f'Could not update')
print(e)
@staticmethod
def _get_eval_functions():
"""Return dictionary of eval functions"""
def cindex(targets, preds):
"""Use sksurv implementation of c-index with event-indicator all ones"""
return concordance_index_censored(np.ones(targets.shape[0], dtype=bool), targets, -preds)
def calibration(targets, preds):
"""Regress targets with preds for calibration"""
reg = LinearRegression().fit(preds.reshape(-1,1), targets)
return reg.coef_[0], reg.intercept_
fn_dict_preds = {
'MSE': sk_metrics.mean_squared_error,
'MAE': sk_metrics.mean_absolute_error,
'R2': sk_metrics.r2_score,
'C-Index': cindex,
'Calibration_slope_intercept': calibration
}
return fn_dict_preds
|
# -*- coding: utf-8 -*-
#
from helpers import Phash
def plot():
from matplotlib import pyplot as plt
import numpy as np
fig, ax = plt.subplots()
with plt.style.context(("ggplot")):
t = np.linspace(0, 2 * np.pi, 101)
s = np.sin(t)
ax.plot(t, s, "k-")
ax.fill_between(t, s + 0.1, s - 0.1, facecolor="k", alpha=0.2)
ax.set_xlim(t[0], t[-1])
ax.set_xlabel("t")
ax.set_ylabel("sin(t)")
ax.set_title("Simple plot")
ax.grid(True)
return fig
def test():
phash = Phash(plot())
assert phash.phash == "af2cd59221727725", phash.get_details()
|
# -*- coding: utf-8 -*-
import io
import os
import click
from click.testing import CliRunner
from pytest import fixture, mark
from storyscript.App import App
from storyscript.Cli import Cli
from storyscript.Project import Project
from storyscript.Version import version
from storyscript.exceptions.CompilerError import CompilerError
from storyscript.exceptions.StoryError import StoryError
@fixture
def runner():
return CliRunner()
@fixture
def echo(patch):
patch.object(click, 'echo')
@fixture
def app(patch):
patch.many(App, ['compile', 'parse'])
return App
def test_cli(runner, echo):
runner.invoke(Cli.main, [])
# NOTE(vesuvium): I didn't find how to get the context in testing
assert click.echo.call_count == 1
def test_cli_alias_parse(runner, app):
runner.invoke(Cli.main, ['p'])
assert app.parse.call_count == 1
def test_cli_alias_compile(runner, app):
runner.invoke(Cli.main, ['c'])
assert app.compile.call_count == 1
def test_cli_alias_lex(runner, app, patch):
patch.object(App, 'lex')
runner.invoke(Cli.main, 'l')
assert app.lex.call_count == 1
def test_cli_alias_grammar(runner, app, patch):
patch.object(App, 'grammar')
runner.invoke(Cli.main, 'g')
assert app.grammar.call_count == 1
def test_cli_alias_new(patch, runner):
patch.object(Project, 'new')
runner.invoke(Cli.main, ['n', 'project'])
Project.new.assert_called_with('project')
def test_cli_alias_help(runner, echo):
runner.invoke(Cli.main, 'h')
click.echo.assert_called_once()
def test_cli_alias_version(runner, echo):
runner.invoke(Cli.main, 'v')
click.echo.assert_called_with(version)
def test_cli_alias_version_flag(runner, echo):
runner.invoke(Cli.main, '-v')
message = 'StoryScript {} - http://storyscript.org'.format(version)
click.echo.assert_called_with(message)
def test_cli_version_flag(runner, echo):
"""
Ensures --version outputs the version
"""
runner.invoke(Cli.main, ['--version'])
message = 'StoryScript {} - http://storyscript.org'.format(version)
click.echo.assert_called_with(message)
def test_cli_compile_with_ignore_option(runner, app):
"""
Ensures that ignore option works when compiling
"""
runner.invoke(Cli.compile, ['path/fake.story',
'--ignore', 'path/sub_dir/my_fake.story'])
App.compile.assert_called_with('path/fake.story', ebnf=None,
ignored_path='path/sub_dir/my_fake.story',
concise=False, first=False, features={})
def test_cli_parse_with_ignore_option(runner, app):
"""
Ensures that ignore option works when parsing
"""
runner.invoke(Cli.parse, ['path/fake.story', '--ignore',
'path/sub_dir/my_fake.story'])
App.parse.assert_called_with('path/fake.story', ebnf=None,
ignored_path='path/sub_dir/my_fake.story',
lower=False, features={})
def test_cli_parse(runner, echo, app, tree):
"""
Ensures the parse command produces the trees for given stories.
"""
App.parse.return_value = {'path': tree}
runner.invoke(Cli.parse, [])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=False, features={})
click.echo.assert_called_with(tree.pretty())
def test_cli_parse_raw(runner, echo, app, tree):
"""
Ensures the parse command supports raw trees
"""
App.parse.return_value = {'path': tree}
runner.invoke(Cli.parse, ['--raw'])
click.echo.assert_called_with(tree)
def test_cli_parse_path(runner, echo, app):
"""
Ensures the parse command supports specifying a path.
"""
runner.invoke(Cli.parse, ['/path'])
App.parse.assert_called_with('/path', ebnf=None,
ignored_path=None, lower=False, features={})
def test_cli_parse_ebnf(runner, echo, app):
"""
Ensures the parse command supports specifying an ebnf file.
"""
runner.invoke(Cli.parse, ['--ebnf', 'test.ebnf'])
App.parse.assert_called_with(os.getcwd(), ebnf='test.ebnf',
ignored_path=None, lower=False, features={})
def test_cli_parse_lower(runner, echo, app):
"""
Ensures the parse command supports lowering
"""
runner.invoke(Cli.parse, ['--lower'])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=True, features={})
def test_cli_parse_features(runner, echo, app):
"""
Ensures the parse command accepts features
"""
runner.invoke(Cli.parse, ['--preview=globals'])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=False,
features={'globals': True})
def test_cli_parse_features_positive(runner, echo, app):
"""
Ensures the parse command accepts positive features
"""
runner.invoke(Cli.parse, ['--preview=+globals'])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=False,
features={'globals': True})
def test_cli_parse_features_negative(runner, echo, app):
"""
Ensures the parse command accepts negative features
"""
runner.invoke(Cli.parse, ['--preview=-globals'])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=False,
features={'globals': False})
def test_cli_parse_features_chain(runner, echo, app):
"""
Ensures the parse command accepts feature chains
"""
runner.invoke(Cli.parse, ['--preview=globals', '--preview=-globals'])
App.parse.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, lower=False,
features={'globals': False})
def test_cli_parse_features_unknown(runner, echo, app):
"""
Ensures the parse command reacts to unknown features
"""
e = runner.invoke(Cli.parse, ['--preview=unknown'])
App.parse.assert_not_called()
assert e.exit_code == 1
click.echo.assert_called_with(
'E0078: Invalid preview flag. '
'`unknown` is not a valid preview feature.'
)
def test_cli_parse_debug(runner, echo, app):
"""
Ensures the parse command supports raises errors with debug=True
"""
runner.invoke(Cli.parse, ['--debug'])
ce = CompilerError(None)
app.parse.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.parse, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, CompilerError)
assert e.exception.message() == 'Unknown compiler error'
def test_cli_parse_ice(runner, echo, app):
"""
Ensures the parse command prints unknown errors
"""
app.parse.side_effect = Exception('ICE')
e = runner.invoke(Cli.parse, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with((
'E0001: Internal error occured: ICE\n'
'Please report at https://github.com/storyscript/storyscript/issues'))
def test_cli_parse_debug_ice(runner, echo, app):
"""
Ensures the parse command supports raises unknown errors with debug=True
"""
app.parse.side_effect = Exception('ICE')
e = runner.invoke(Cli.parse, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, Exception)
assert str(e.exception) == 'ICE'
def test_cli_parse_not_found(runner, echo, app, patch):
"""
Ensures the parse command catches errors
"""
patch.object(StoryError, 'message')
ce = CompilerError(None)
app.parse.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.parse, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with(StoryError.message())
def test_cli_compile(patch, runner, echo, app):
"""
Ensures the compile command compiles a story.
"""
patch.object(click, 'style')
runner.invoke(Cli.compile, [])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=False, features={})
click.style.assert_called_with('Script syntax passed!', fg='green')
click.echo.assert_called_with(click.style())
def test_cli_compile_path(patch, runner, app):
"""
Ensures the compile command supports specifying a path
"""
runner.invoke(Cli.compile, ['/path'])
App.compile.assert_called_with('/path', ebnf=None,
ignored_path=None, concise=False,
first=False, features={})
def test_cli_compile_output_file(patch, runner, app):
"""
Ensures the compile command supports specifying an output file.
"""
patch.object(io, 'open')
runner.invoke(Cli.compile, ['/path', 'hello.story', '-j'])
io.open.assert_called_with('hello.story', 'w')
io.open().__enter__().write.assert_called_with(App.compile())
@mark.parametrize('option', ['--silent', '-s'])
def test_cli_compile_silent(runner, echo, app, option):
"""
Ensures --silent makes everything quiet
"""
result = runner.invoke(Cli.compile, [option])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=False, features={})
assert result.output == ''
assert click.echo.call_count == 0
@mark.parametrize('option', ['--concise', '-c'])
def test_cli_compile_concise(runner, echo, app, option):
"""
Ensures --concise makes everything concise
"""
runner.invoke(Cli.compile, [option])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=True,
first=False, features={})
@mark.parametrize('option', ['--first', '-f'])
def test_cli_compile_first(runner, echo, app, option):
"""
Ensures --first only yields the first story
"""
runner.invoke(Cli.compile, [option])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=True, features={})
def test_cli_compile_debug(runner, echo, app):
runner.invoke(Cli.compile, ['--debug'])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=False, features={})
def test_cli_compile_features(runner, echo, app):
runner.invoke(Cli.compile, ['--preview=globals'])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=False, features={'globals': True})
@mark.parametrize('option', ['--json', '-j'])
def test_cli_compile_json(runner, echo, app, option):
"""
Ensures --json outputs json
"""
runner.invoke(Cli.compile, [option])
App.compile.assert_called_with(os.getcwd(), ebnf=None,
ignored_path=None, concise=False,
first=False, features={})
click.echo.assert_called_with(App.compile())
def test_cli_compile_ebnf(runner, echo, app):
runner.invoke(Cli.compile, ['--ebnf', 'test.ebnf'])
App.compile.assert_called_with(os.getcwd(), ebnf='test.ebnf',
ignored_path=None, concise=False,
first=False, features={})
def test_cli_compile_ice(runner, echo, app):
"""
Ensures the compile command prints unknown errors
"""
app.compile.side_effect = Exception('ICE')
e = runner.invoke(Cli.compile, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with((
'E0001: Internal error occured: ICE\n'
'Please report at https://github.com/storyscript/storyscript/issues'))
def test_cli_compile_debug_ice(runner, echo, app):
"""
Ensures the compile command supports raises unknown errors with debug=True
"""
app.compile.side_effect = Exception('ICE')
e = runner.invoke(Cli.compile, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, Exception)
assert str(e.exception) == 'ICE'
def test_cli_compile_not_found(patch, runner, echo, app):
"""
Ensures the compile command catches errors
"""
ce = CompilerError(None)
app.compile.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.compile, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with(f'E0001: {StoryError._internal_error(ce)}')
def test_cli_compile_not_found_debug(runner, echo, app):
"""
Ensures the compile command raises errors with debug=True
"""
ce = CompilerError(None)
app.compile.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.compile, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, CompilerError)
assert e.exception.message() == 'Unknown compiler error'
def test_cli_lex(patch, magic, runner, app, echo):
"""
Ensures the lex command outputs lexer tokens
"""
token = magic(type='token', value='value')
patch.object(App, 'lex', return_value={'one.story': [token]})
runner.invoke(Cli.lex, [])
App.lex.assert_called_with(os.getcwd(), ebnf=None, features={})
click.echo.assert_called_with('0 token value')
assert click.echo.call_count == 2
def test_cli_lex_path(patch, magic, runner, app):
"""
Ensures the lex command path defaults to cwd
"""
patch.object(App, 'lex', return_value={'one.story': [magic()]})
runner.invoke(Cli.lex, ['/path'])
App.lex.assert_called_with('/path', ebnf=None, features={})
def test_cli_lex_ebnf(patch, runner):
"""
Ensures the lex command allows specifying an ebnf file.
"""
patch.object(App, 'lex')
runner.invoke(Cli.lex, ['--ebnf', 'my.ebnf'])
App.lex.assert_called_with(os.getcwd(), ebnf='my.ebnf', features={})
def test_cli_lex_features(patch, runner):
"""
Ensures the lex command allows specifying features
"""
patch.object(App, 'lex')
runner.invoke(Cli.lex, ['--preview=globals'])
App.lex.assert_called_with(os.getcwd(), ebnf=None,
features={'globals': True})
def test_cli_lex_ice(patch, runner, echo, app):
"""
Ensures the lex command prints unknown errors
"""
patch.object(App, 'lex', side_effect=Exception('ICE'))
e = runner.invoke(Cli.lex, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with((
'E0001: Internal error occured: ICE\n'
'Please report at https://github.com/storyscript/storyscript/issues'))
def test_cli_lex_debug_ice(patch, runner, echo, app):
"""
Ensures the lex command supports raises unknown errors with debug=True
"""
patch.object(App, 'lex', side_effect=Exception('ICE'))
e = runner.invoke(Cli.lex, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, Exception)
assert str(e.exception) == 'ICE'
def test_cli_lex_not_found(patch, runner, echo, app):
"""
Ensures the lex command catches errors
"""
patch.object(StoryError, 'message')
ce = CompilerError(None)
patch.object(App, 'lex')
App.lex.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.lex, ['/a/non/existent/file'])
assert e.exit_code == 1
click.echo.assert_called_with(StoryError.message())
def test_cli_lex_not_found_debug(patch, runner, echo, app):
"""
Ensures the lex command raises errors with debug=True
"""
ce = CompilerError(None)
patch.object(App, 'lex')
App.lex.side_effect = StoryError(ce, None)
e = runner.invoke(Cli.lex, ['--debug', '/a/non/existent/file'])
assert e.exit_code == 1
assert isinstance(e.exception, CompilerError)
assert e.exception.message() == 'Unknown compiler error'
def test_cli_grammar(patch, runner, app, echo):
patch.object(App, 'grammar')
runner.invoke(Cli.grammar, [])
assert app.grammar.call_count == 1
click.echo.assert_called_with(app.grammar())
def test_cli_new(patch, runner):
"""
Ensures Cli.new uses Project.new
"""
patch.object(Project, 'new')
runner.invoke(Cli.new, 'project')
Project.new.assert_called_with('project')
def test_cli_help(patch, runner, echo):
runner.invoke(Cli.help, [])
# NOTE(vesuvium): another weird click thing. The context.parent.get_help
# seems to mess up with mock, registering no call on click.echo
assert click.echo.call_count == 0
def test_cli_version(patch, runner, echo):
runner.invoke(Cli.version, [])
click.echo.assert_called_with(version)
|
#: W601
if a.has_key("b"):
print a
#: W602
raise DummyError, "Message"
#: W602
raise ValueError, "hello %s %s" % (1, 2)
#: Okay
raise type_, val, tb
raise Exception, Exception("f"), t
#: W603
if x <> 0:
x = 0
#: W604
val = `1 + 2`
#: W605
regex = '\.png$'
#: W605
regex = '''
\.png$
'''
#: Okay
regex = r'\.png$'
regex = '\\.png$'
regex = r'''
\.png$
'''
regex = r'''
\\.png$
'''
s = '\\'
#: W606
async = 42
#: W606
await = 42
#: W606
def async():
pass
#: W606
def await():
pass
#: W606
class async:
pass
#: W606
class await:
pass
#: Okay
async def read_data(db):
data = await db.fetch('SELECT ...')
#: Okay
if await fut:
pass
if (await fut):
pass
if await fut + 1:
pass
if (await fut) + 1:
pass
pair = await fut, 'spam'
pair = (await fut), 'spam'
with await fut, open():
pass
with (await fut), open():
pass
await foo()['spam'].baz()()
return await coro()
return (await coro())
res = await coro() ** 2
res = (await coro()) ** 2
func(a1=await coro(), a2=0)
func(a1=(await coro()), a2=0)
await foo() + await bar()
(await foo()) + (await bar())
-await foo()
-(await foo())
|
from __future__ import unicode_literals
from django_evolution.mutations import MoveToDjangoMigrations
MUTATIONS = [
MoveToDjangoMigrations(),
]
|
"""base code"""
import os
import subprocess
from pyngrok import ngrok
try:
COLAB_ENV = True
from google.colab import drive # type:ignore
except ImportError:
COLAB_ENV = False
PIPE = subprocess.PIPE
EXTENSIONS = [
"ms-python.python",
"jithurjacob.nbpreviewer",
"njpwerner.autodocstring",
"ms-python.vscode-pylance",
"ms-vscode-remote.remote-wsl",
"ms-python.anaconda-extension-pack",
"donjayamanne.githistory",
"bee.git-temporal-vscode",
"kiteco.kite",
"vscode-icons-team.vscode-icons",
]
# "julialang.language-julia"
class ColabCode:
"""[sets up code server on an ngrok link]"""
def __init__(
self,
port=10000,
password=None,
mount_drive=False,
add_extensions=None,
prompt="powerline-plain",
get_zsh=False,
):
self.port = port
self.password = password
self._mount = mount_drive
self._prompt = prompt
self._zsh = get_zsh
self.extensions = EXTENSIONS
if add_extensions is not None and add_extensions != []:
if isinstance(add_extensions, list) and isinstance(add_extensions[0], str):
self.extensions += add_extensions
else:
raise TypeError(
"You need to pass a list of string(s) e.g. ['ms-python.python']"
)
self._install_code()
self._install_extensions()
# install code-server, then extensions
# creates the User folder, then transfer settings
self._settings()
self._start_server()
self._run_code()
def _settings(self):
"""install ohmybash and set up code_server settings.json file
Plus, set up powerline bash prompt
https://github.com/ohmybash/oh-my-bash
https://github.com/cdr/code-server/issues/1680#issue-620677320
"""
subprocess.run(
[
"wget",
"https://raw.githubusercontent.com/ohmybash/oh-my-bash/master/tools/install.sh",
"-O",
"install_ohmybash.sh",
],
stdout=PIPE,
check=True,
)
subprocess.run(["sh", "install_ohmybash.sh"], stdout=PIPE, check=True)
if self._zsh:
subprocess.run(["sh", "./code_server/get_zsh.sh"], stdout=PIPE, check=True)
# set bash theme as 'powerline-plain'
# for undu's theme : `source ~/.powerline.bash` works
if self._prompt in [
"powerline-plain",
"powerline",
"agnoster",
"powerline-undu",
]:
subprocess.run(
["sh", "./code_server/sed.sh", f"{self._prompt}"],
stdout=PIPE,
check=True,
)
# either `shell=False` or `cp x y` instead of list
# https://stackoverflow.com/a/17880895/13070032
for src, dest in {
"settings.json": "~/.local/share/code-server/User/settings.json",
"coder.json": "~/.local/share/code-server/coder.json",
".undu-powerline.bash": "~/.powerline.bash",
}.items():
subprocess.call(
f"cp ./code_server/{src} {dest}",
stdout=PIPE,
shell=True,
)
# to enable `python -m venv envname`
# also add nano [vim, tmux (default py2!), ... if needed]
subprocess.call(
"apt-get update && apt-get install python3-venv nano",
stdout=PIPE,
shell=True,
)
def _install_code(self):
subprocess.run(
["wget", "https://code-server.dev/install.sh"],
stdout=PIPE,
check=True,
)
subprocess.run(["sh", "install.sh"], stdout=PIPE, check=True)
def _install_extensions(self):
"""set check as False - otherwise non existing extension will give error"""
for ext in self.extensions:
subprocess.run(
["code-server", "--install-extension", f"{ext}"], check=False
)
def _start_server(self):
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(port=self.port, options={"bind_tls": True})
print(f"Code Server can be accessed on: {url}")
def _run_code(self):
os.system(f"fuser -n tcp -k {self.port}")
_tele = "--disable-telemetry"
if self._mount and COLAB_ENV:
drive.mount("/content/drive")
if self.password:
code_cmd = (
f"PASSWORD={self.password} code-server --port {self.port} {_tele}"
)
else:
code_cmd = f"code-server --port {self.port} --auth none {_tele}"
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
for line in proc.stdout:
print(line, end="")
|
#Assigned Ports: 11995-11999
#WHATSAT to Hands
import asyncio
async def main():
reader, writer = await asyncio.open_connection('127.0.0.1', 11995)
#writer.write("IAMAT kiwi.cs.ucla.edu +34.068930-118.445127 1520023934.918963997".encode())
writer.write("WHATSAT kiwi.cs.ucla.edu 10 5".encode())
writer.write_eof()
data = await reader.read()
print('Received: {}'.format(data.decode()))
writer.close()
if __name__ == '__main__':
asyncio.run(main())
|
import math
from .core import get_catboost_bin_module, CatBoost, CatBoostError
from .utils import _import_matplotlib
_catboost = get_catboost_bin_module()
FeatureExplanation = _catboost.FeatureExplanation
def _check_model(model):
if not isinstance(model, CatBoost):
raise CatBoostError("Model should be CatBoost")
def to_polynom(model):
_check_model(model)
return _catboost.to_polynom(model._object)
def to_polynom_string(model):
_check_model(model)
return _catboost.to_polynom_string(model._object)
def explain_features(model):
_check_model(model)
return _catboost.explain_features(model._object)
def calc_features_strength(model):
explanations = explain_features(model)
features_strength = [expl.calc_strength() for expl in explanations]
return features_strength
def plot_pdp(arg, size_per_plot=(5, 5), plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
if isinstance(arg, CatBoost):
arg = explain_features(arg)
if isinstance(arg, _catboost.FeatureExplanation):
arg = [arg]
assert len(arg) > 0
assert isinstance(arg, list)
for element in arg:
assert isinstance(element, _catboost.FeatureExplanation)
figs = []
for feature_explanation in arg:
dimension = feature_explanation.dimension()
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
fig.suptitle("Feature #{}".format(feature_explanation.feature))
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.set_size_inches(size_per_plot[0] * plots_per_row, size_per_plot[1] * rows)
for dim in range(dimension):
ax = axes[dim // plots_per_row][dim % plots_per_row]
ax.set_title("Dimension={}".format(dim))
ax.set_xlabel("feature value")
ax.set_ylabel("model value")
borders, values = feature_explanation.calc_pdp(dim)
xs = []
ys = []
if feature_explanation.type == "Float":
if len(borders) == 0:
xs.append(-0.1)
xs.append(0.1)
ys.append(feature_explanation.expected_bias[dim])
ys.append(feature_explanation.expected_bias[dim])
ax.plot(xs, ys)
else:
offset = max(0.1, (borders[0] + borders[-1]) / 2)
xs.append(borders[0] - offset)
ys.append(feature_explanation.expected_bias[dim])
for border, value in zip(borders, values):
xs.append(border)
ys.append(ys[-1])
xs.append(border)
ys.append(value)
xs.append(borders[-1] + offset)
ys.append(ys[-1])
ax.plot(xs, ys)
else:
xs = ['bias'] + list(map(str, borders))
ys = feature_explanation.expected_bias[dim] + values
ax.bar(xs, ys)
figs.append(fig)
return figs
def plot_features_strength(model, height_per_feature=0.5, width_per_plot=5, plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
strengths = calc_features_strength(model)
dimension = len(strengths[0])
features = len(strengths)
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.suptitle("Features Strength")
fig.set_size_inches(width_per_plot * plots_per_row, height_per_feature * features * rows)
for dim in range(dimension):
strengths = [(s[dim], i) for i, s in enumerate(strengths)]
# strengths = list(reversed(sorted(strengths)))
strengths = list(sorted(strengths))
labels = ["Feature #{}".format(f) for _, f in strengths]
strengths = [s for s, _ in strengths]
ax = axes[dim // plots_per_row][dim % plots_per_row]
colors = [(1, 0, 0) if s > 0 else (0, 0, 1) for s in strengths]
ax.set_title("Dimension={}".format(dim))
ax.barh(range(len(strengths)), strengths, align='center', color=colors)
ax.set_yticks(range(len(strengths)))
ax.set_yticklabels(labels)
# ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Prediction value change')
return fig
|
# -----------------------------------------------------
# test_eep.py: Unit tests for eep.py.
# -----------------------------------------------------
# Make sure we can import i2p
import sys; sys.path += ['../../']
import traceback, sys
from i2p import eep
def verify_html(s):
"""Raise an error if s does not end with </html>"""
assert s.strip().lower()[-7:] == '</html>'
def eepget_test():
try:
verify_html(eep.urlget('http://duck.i2p/index.html'))
verify_html(eep.urlget('http://duck.i2p/'))
verify_html(eep.urlget('http://duck.i2p'))
verify_html(eep.urlget('duck.i2p/'))
verify_html(eep.urlget('duck.i2p'))
except Exception, e:
print 'Unit test failed for eepget'
print "Note that urllib2.urlopen uses IE's proxy settings " + \
"in Windows."
print "This may cause " + \
"urllib2.urlopen('http://www.google.com/') to fail."
traceback.print_exc(); sys.exit()
print 'eepget: OK'
def test():
eepget_test()
if __name__ == '__main__':
print 'Testing:'
test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.