id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
418699
|
import torch
import torch.nn as nn
# Discriminator Model
class Dis28x28(nn.Module):
def __init__(self):
super(Dis28x28, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(1, 20, kernel_size=5, stride=1, padding=0),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(50, 500, kernel_size=4, stride=1, padding=0),
nn.PReLU(),
nn.Conv2d(500, 2, kernel_size=1, stride=1, padding=0),
)
def forward(self, x):
out = self.model(x)
return out.squeeze()
# Generator Model
class Gen28x28(nn.Module):
def __init__(self, latent_dims):
super(Gen28x28, self).__init__()
self.model = nn.Sequential(
nn.ConvTranspose2d(latent_dims, 1024, kernel_size=4, stride=1),
nn.BatchNorm2d(1024, affine=False),
nn.PReLU(),
nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512, affine=False),
nn.PReLU(),
nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256, affine=False),
nn.PReLU(),
nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128, affine=False),
nn.PReLU(),
nn.ConvTranspose2d(128, 1, kernel_size=6, stride=1, padding=1),
nn.Sigmoid())
def forward(self, x):
x = x.view(x.size(0), x.size(1), 1, 1)
out = self.model(x)
return out
|
418706
|
import threading
from typing import Callable
import paramiko
import requests
from decorator import decorator
from sshtunnel import SSHTunnelForwarder
from logger import logger
from perfrunner.helpers.misc import uhex
from perfrunner.helpers.rest import RestHelper
from perfrunner.settings import ClusterSpec, TestConfig
@decorator
def with_profiles(method: Callable, *args, **kwargs):
test = args[0]
test.profiler.schedule()
return method(*args, **kwargs)
class Timer(threading.Timer):
def __init__(self, interval, function, num_runs=1, args=None, kwargs=None):
super().__init__(interval, function, args, kwargs)
self.num_runs = num_runs
self.daemon = True
def run(self):
super().run()
self.repeat()
def repeat(self):
self.num_runs -= 1
if self.num_runs:
self.finished.clear()
self.run()
class Profiler:
DEBUG_PORTS = {
'fts': 8094,
'index': 9102,
'goxdcr': 9998,
'kv': 9998, # will be deprecated in future
'n1ql': 8093,
'eventing': 8096,
'projector': 9999
}
ENDPOINTS = {
'cpu': 'http://127.0.0.1:{}/debug/pprof/profile',
'heap': 'http://127.0.0.1:{}/debug/pprof/heap',
'goroutine': 'http://127.0.0.1:{}/debug/pprof/goroutine?debug=2',
}
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig):
self.test_config = test_config
self.rest = RestHelper(cluster_spec, test_config)
self.master_node = next(cluster_spec.masters)
self.ssh_username, self.ssh_password = cluster_spec.ssh_credentials
self.cluster_spec = cluster_spec
self.profiling_settings = test_config.profiling_settings
self.linux_perf_path = '/opt/couchbase/var/lib/couchbase/logs/'
def new_tunnel(self, host: str, port: int) -> SSHTunnelForwarder:
return SSHTunnelForwarder(
ssh_address_or_host=host,
ssh_username=self.ssh_username,
ssh_password=self.ssh_password,
remote_bind_address=('127.0.0.1', port),
)
def save(self, host: str, service: str, profile: str, content: bytes):
fname = '{}_{}_{}_{}.pprof'.format(host, service, profile, uhex()[:6])
logger.info('Collected {} '.format(fname))
with open(fname, 'wb') as fh:
fh.write(content)
def linux_perf_profile(self, host: str, fname: str, path: str):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=host, username=self.ssh_username,
password=self.ssh_password)
except Exception:
logger.info('Cannot connect to the "{}" via SSH Server'.format(host))
exit()
logger.info('Capturing linux profile using linux perf record ')
cmd = 'perf record -a -F {} -g --call-graph {} ' \
'-p $(pgrep memcached) -o {}{} ' \
'-- sleep {}'.format(self.profiling_settings.linux_perf_frequency,
self.profiling_settings.linux_perf_callgraph,
path,
fname,
self.profiling_settings.linux_perf_profile_duration)
stdin, stdout, stderr = client.exec_command(cmd)
exit_status = stdout.channel.recv_exit_status()
if exit_status == 0:
logger.info("linux perf record: linux perf profile capture completed")
else:
logger.info("perf record failed , exit_status : ", exit_status)
client.close()
def profile(self, host: str, service: str, profile: str):
logger.info('Collecting {} profile on {}'.format(profile, host))
endpoint = self.ENDPOINTS[profile]
port = self.DEBUG_PORTS[service]
if self.profiling_settings.linux_perf_profile_flag:
logger.info('Collecting {} profile on {} using linux perf '
'reccord'.format(profile, host))
fname = 'linux_{}_{}_{}_perf.data'.format(host, profile, uhex()[:4])
self.linux_perf_profile(host=host, fname=fname, path=self.linux_perf_path)
else:
logger.info('Collecting {} profile on {}'.format(profile, host))
with self.new_tunnel(host, port) as tunnel:
url = endpoint.format(tunnel.local_bind_port)
response = requests.get(url=url, auth=self.rest.auth)
self.save(host, service, profile, response.content)
def timer(self, **kwargs):
timer = Timer(
function=self.profile,
interval=self.test_config.profiling_settings.interval,
num_runs=self.test_config.profiling_settings.num_profiles,
kwargs=kwargs,
)
timer.start()
def schedule(self):
for service in self.test_config.profiling_settings.services:
logger.info('Scheduling profiling of "{}" services'.format(service))
if service == 'projector':
active_nodes_by_role = self.rest.get_active_nodes_by_role(
self.master_node, role='kv')
else:
active_nodes_by_role = self.rest.get_active_nodes_by_role(
self.master_node, role=service)
for server in active_nodes_by_role:
for profile in self.test_config.profiling_settings.profiles:
self.timer(host=server, service=service, profile=profile)
|
418737
|
class Reporter():
def __init__(self, checker):
pass
def doReport(self):
pass
def appendMsg(self):
pass
def export(self):
pass
|
418741
|
from django.urls import path
from .views import (
CreateEventView,
EventDetailView,
QrEventListView,
RegisterTicketsView,
ScanTicketView,
UpdateTicketsView,
render_ticket,
)
urlpatterns = [
path("", QrEventListView.as_view(), name="qr-event-list"),
path("detail/<int:pk>/", EventDetailView.as_view(), name="qr-event-detail"),
path("create-qrevent/", CreateEventView.as_view(), name="create-qr-event"),
path(
"register/<int:qr_event_id>/<str:qr_ticket_id>/",
RegisterTicketsView.as_view(),
name="register_tickets",
),
path(
"render/<int:qr_event_id>/<str:qr_ticket_id>/",
render_ticket,
name="render",
),
path("scan/", ScanTicketView.as_view(), name="scan"),
path("scan/<str:ticket_id>", UpdateTicketsView.as_view(), name="register"),
]
|
418762
|
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
from localstack.utils.common import select_attributes
class Route53RecordSet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::Route53::RecordSet"
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("Name") # Ref attribute is the domain name itself
def fetch_state(self, stack_name, resources):
route53 = aws_stack.connect_to_service("route53")
props = self.props
result = route53.list_resource_record_sets(HostedZoneId=props["HostedZoneId"])[
"ResourceRecordSets"
]
result = [r for r in result if r["Name"] == props["Name"] and r["Type"] == props["Type"]]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
def param_change_batch(params, **kwargs):
attr_names = [
"Name",
"Type",
"SetIdentifier",
"Weight",
"Region",
"GeoLocation",
"Failover",
"MultiValueAnswer",
"TTL",
"ResourceRecords",
"AliasTarget",
"HealthCheckId",
]
attrs = select_attributes(params, attr_names)
alias_target = attrs.get("AliasTarget", {})
alias_target["EvaluateTargetHealth"] = alias_target.get("EvaluateTargetHealth", False)
return {
"Comment": params.get("Comment", ""),
"Changes": [{"Action": "CREATE", "ResourceRecordSet": attrs}],
}
return {
"create": {
"function": "change_resource_record_sets",
"parameters": {
"HostedZoneId": "HostedZoneId",
"ChangeBatch": param_change_batch,
},
}
}
|
418816
|
from typing import Any
from django_filters import rest_framework as filters
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.permissions import AllowAny, IsAuthenticatedOrReadOnly
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.reverse import reverse
from ...models import JOB_INDEXES, Job, User
from ..permissions import IsAuthorOrReadOnly
from ..serializers import ContactSerializer, JobSerializer, UserSerializer
INDEXED_FILTERS = {field: ["contains", "exact"] for field in JOB_INDEXES}
class JobsViewList(ListCreateAPIView):
serializer_class = JobSerializer
queryset = Job.objects.all()
permission_classes = [IsAuthenticatedOrReadOnly]
filter_backends = (filters.DjangoFilterBackend,)
# REVIEW: Which fields can be used to filter queries.
# Here we use the same as the model indexes and can be extended
filterset_fields = INDEXED_FILTERS
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class JobsViewDetails(RetrieveUpdateDestroyAPIView):
serializer_class = JobSerializer
permission_classes = [IsAuthorOrReadOnly]
queryset = Job.objects.all()
|
418845
|
from __future__ import absolute_import
__all__ = ['ProviderNotRegistered']
class ProviderNotRegistered(Exception):
pass
class IdentityNotValid(Exception):
pass
|
418846
|
import os
from helpers import parameters as params
configReader = params.GetConfigReader()
name = 'bamgineer'
def GetBamgineerMem(mem_type):
"""returns specified value from the configuration file"""
mem_string = 'bamgineer_mem_' + mem_type
return configReader.get('CLUSTER', mem_string)
def GetExons():
exons_path = configReader.get('REFERENCE', 'exons_path')
return exons_path
def GetRef():
reference_path = configReader.get('REFERENCE', 'reference_path')
return reference_path
def GetVCF():
vcf_path = configReader.get('REFERENCE', 'vcf_path')
return vcf_path
def GetResultsPath():
results_path = configReader.get('RESULTS', 'results_path')
return results_path
def GetJavaPath():
#if config file specfies path use it , otherwise search the system for the tool, if still not found
try:
java_path = configReader.get('SOFTWARE', 'java_path')
return java_path
except:
inpath=tool_loaded('java')
if(inpath):
print('User has not defined Java but the tool is in system path: '+ inpath)
java_path = inpath
return java_path
pass
else:
print("Couldn't find tool in the path. Exiting the program!")
return
def GetBeaglePath():
beagle_path = configReader.get('SOFTWARE', 'beagle_path')
return beagle_path
def GetSamtoolsPath():
try:
samtools_path = configReader.get('SOFTWARE', 'samtools_path')
return samtools_path
except:
inpath=tool_loaded('samtools')
if(inpath):
print('User has not defined Samtools but the tool is in system path: '+ inpath)
samtools_path = inpath
return samtools_path
pass
else:
print("Couldn't find Samtools in the path. Exiting the program!")
return
def GetBedtoolsPath():
try:
bedtools_path = configReader.get('SOFTWARE', 'bedtools_path')
return bedtools_path
except:
inpath=tool_loaded('bedtools')
if(inpath):
print('User has not defined Bedtools but the tool is in system path: '+ inpath)
bedtools_path = inpath
return bedtools_path
pass
else:
print("Couldn't find Bedtools in the path. Exiting the program!")
return
def GetVCFtoolsPath():
try:
vcftools_path = configReader.get('SOFTWARE', 'vcftools_path')
return vcftools_path
except:
inpath=tool_loaded('vcftools')
if(inpath):
print('User has not defined VCFtools but the tool is in system path: '+ inpath)
vcftools_path = inpath
return vcftools_path
pass
else:
print("Couldn't find VCFtools in the path. Exiting the program!")
return
def GetSambambaPath():
try:
sambamba_path = configReader.get('SOFTWARE', 'sambamba_path')
return sambamba_path
except:
inpath=tool_loaded('sambamba')
if(inpath):
print('User has not defined Sambamba but the tool is in system path: '+ inpath)
sambamba_path = inpath
return sambamba_path
pass
else:
print("Couldn't find Sambamba in the path. Exiting the program!")
return
def tool_loaded(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return False
|
418862
|
import datetime
import os
import urllib2
import urllib
import numpy
import json
from django.shortcuts import render
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template import loader
from django.core.cache import cache
from pages.models import Page,SearchTerm
from pgrank.pgrank import pgrank
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
import collections
import logging
# Bing API key
API_KEY = "yourkeyhere"
#PARAMETERS
test_mode = False
num_reviews = 20
num_bestwords = 20000
stopwords = set(stopwords.words('english'))
method_selfeatures = 'best_words_features'
def about(request):
return render(request, 'movie_reviews/about.html',
{'request': request})
def bing_api(query):
keyBing = API_KEY # get Bing key from: https://datamarket.azure.com/account/keys
credentialBing = 'Basic ' + (':%s' % keyBing).encode('base64')[:-1] # the "-1" is to remove the trailing "\n" which encode adds
searchString = '%27X'+query.replace(" ",'+')+'movie+review%27'
top = 50#maximum allowed by Bing
reviews_urls = []
if num_reviews<top:
offset = 0
url = 'https://api.datamarket.azure.com/Bing/Search/Web?' + \
'Query=%s&$top=%d&$skip=%d&$format=json' % (searchString, num_reviews, offset)
request = urllib2.Request(url)
request.add_header('Authorization', credentialBing)
requestOpener = urllib2.build_opener()
response = requestOpener.open(request)
results = json.load(response)
reviews_urls = [ d['Url'] for d in results['d']['results']]
else:
nqueries = int(float(num_reviews)/top)+1
for i in xrange(nqueries):
offset = top*i
if i==nqueries-1:
top = num_reviews-offset
url = 'https://api.datamarket.azure.com/Bing/Search/Web?' + \
'Query=%s&$top=%d&$skip=%d&$format=json' % (searchString, top, offset)
request = urllib2.Request(url)
request.add_header('Authorization', credentialBing)
requestOpener = urllib2.build_opener()
response = requestOpener.open(request)
else:
top=50
url = 'https://api.datamarket.azure.com/Bing/Search/Web?' + \
'Query=%s&$top=%d&$skip=%d&$format=json' % (searchString, top, offset)
request = urllib2.Request(url)
request.add_header('Authorization', credentialBing)
requestOpener = urllib2.build_opener()
response = requestOpener.open(request)
results = json.load(response)
reviews_urls += [ d['Url'] for d in results['d']['results']]
print 'REVIEWS NUMBER:',len(reviews_urls)
return reviews_urls
def parse_bing_results():
file_data = open(os.path.dirname(__file__)+'/bing_the_martian_results.json','r')
bing_json = json.load(file_data)
print len(bing_json['d']['results'])
reviews_urls = [ d['Url'] for d in bing_json['d']['results']]
print reviews_urls
return reviews_urls
def analyzer(request):
context = {}
if request.method == 'POST':
post_data = request.POST
query = post_data.get('query', None)
if query:
return redirect('%s?%s' % (reverse('webmining_server.views.analyzer'),
urllib.urlencode({'q': query})))
elif request.method == 'GET':
get_data = request.GET
query = get_data.get('q')
if not query:
return render_to_response(
'movie_reviews/home.html', RequestContext(request, context))
context['query'] = query
stripped_query = query.strip().lower()
urls = []
if test_mode:
urls = parse_bing_results()
else:
urls = bing_api(stripped_query)
if len(urls)== 0:
return render_to_response(
'movie_reviews/noreviewsfound.html', RequestContext(request, context))
print 'urls:',str(urls[:num_reviews])
if not SearchTerm.objects.filter(term=stripped_query).exists():
s = SearchTerm(term=stripped_query)
s.save()
try:
#scrape
cmd = 'cd ../scrapy_spider & scrapy crawl scrapy_spider_reviews -a url_list=%s -a search_key=%s' %('\"'+str(','.join(urls[:num_reviews]).encode('utf-8'))+'\"','\"'+str(stripped_query)+'\"')
print 'cmd:',cmd
os.system(cmd)
except:
print 'error!'
s.delete()
else:
#collect the pages already scraped
s = SearchTerm.objects.get(term=stripped_query)
#calc num pages
pages = s.pages.all().filter(review=True)
if len(pages) == 0:
s.delete()
return render_to_response(
'movie_reviews/noreviewsfound.html', RequestContext(request, context))
s.num_reviews = len(pages)
s.save()
context['searchterm_id'] = int(s.id)
#train classifier with nltk
def train_clf(method):
negidxs = movie_reviews.fileids('neg')
posidxs = movie_reviews.fileids('pos')
if method=='stopword_filtered_words_features':
negfeatures = [(stopword_filtered_words_features(movie_reviews.words(fileids=[file])), 'neg') for file in negidxs]
posfeatures = [(stopword_filtered_words_features(movie_reviews.words(fileids=[file])), 'pos') for file in posidxs]
elif method=='best_words_features':
negfeatures = [(best_words_features(movie_reviews.words(fileids=[file])), 'neg') for file in negidxs]
posfeatures = [(best_words_features(movie_reviews.words(fileids=[file])), 'pos') for file in posidxs]
elif method=='best_bigrams_words_features':
negfeatures = [(best_bigrams_words_features(movie_reviews.words(fileids=[file])), 'neg') for file in negidxs]
posfeatures = [(best_bigrams_words_features(movie_reviews.words(fileids=[file])), 'pos') for file in posidxs]
trainfeatures = negfeatures + posfeatures
clf = NaiveBayesClassifier.train(trainfeatures)
return clf
def stopword_filtered_words_features(words):
return dict([(word, True) for word in words if word not in stopwords])
#Eliminate Low Information Features
def GetHighInformationWordsChi(num_bestwords):
word_fd = FreqDist()
label_word_fd = ConditionalFreqDist()
for word in movie_reviews.words(categories=['pos']):
word_fd[word.lower()] +=1
label_word_fd['pos'][word.lower()] +=1
for word in movie_reviews.words(categories=['neg']):
word_fd[word.lower()] +=1
label_word_fd['neg'][word.lower()] +=1
pos_word_count = label_word_fd['pos'].N()
neg_word_count = label_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.iteritems():
pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word],
(freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word],
(freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:num_bestwords]
bestwords = set([w for w, s in best])
return bestwords
bestwords = cache.get('bestwords')
if bestwords == None:
bestwords = GetHighInformationWordsChi(num_bestwords)
def best_words_features(words):
return dict([(word, True) for word in words if word in bestwords])
def best_bigrams_words_features(words, measure=BigramAssocMeasures.chi_sq, nbigrams=200):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(measure, nbigrams)
d = dict([(bigram, True) for bigram in bigrams])
d.update(best_words_features(words))
return d
clf = cache.get('clf')
if clf == None:
clf = train_clf(method_selfeatures)
cntpos = 0
cntneg = 0
for p in pages:
words = p.content.split(" ")
feats = best_words_features(words)#bigram_word_features(words)#stopword_filtered_word_feats(words)
#print feats
str_sent = clf.classify(feats)
if str_sent == 'pos':
p.sentiment = 1
cntpos +=1
else:
p.sentiment = -1
cntneg +=1
p.save()
context['reviews_classified'] = len(pages)
context['positive_count'] = cntpos
context['negative_count'] = cntneg
context['classified_information'] = True
return render_to_response(
'movie_reviews/home.html', RequestContext(request, context))
def pgrank_view(request,pk):
context = {}
get_data = request.GET
scrape = get_data.get('scrape','False')
s = SearchTerm.objects.get(id=pk)
if scrape == 'True':
pages = s.pages.all().filter(review=True)
urls = []
for u in pages:
urls.append(u.url)
#crawl
cmd = 'cd ../scrapy_spider & scrapy crawl scrapy_spider_recursive -a url_list=%s -a search_id=%s' %('\"'+str(','.join(urls[:]).encode('utf-8'))+'\"','\"'+str(pk)+'\"')
print 'cmd:',cmd
os.system(cmd)
links = s.links.all()
if len(links)==0:
context['no_links'] = True
return render_to_response(
'movie_reviews/pg-rank.html', RequestContext(request, context))
#calc pgranks
pgrank(pk)
#load pgranks in descending order of pagerank
pages_ordered = s.pages.all().filter(review=True).order_by('-new_rank')
context['pages'] = pages_ordered
return render_to_response(
'movie_reviews/pg-rank.html', RequestContext(request, context))
|
418863
|
import requests
from bs4 import BeautifulSoup
import os
import fleep
from magic import magic
def down(name, tag):
a = tag.select('a')
if a:
url = a[0].get('href')
down_res = requests.get(url=url)
mime = magic.from_buffer(down_res.content[0:2048], mime=True)
# for element in fleep.data:
# if element['mime'] == mime:
# print('{} - {}'.format(mime, element['extension']))
# break
extension = 'mp3'
if mime == 'audio/ogg':
extension = 'oga'
with open('{}.{}'.format(name, extension), 'wb') as f:
f.write(down_res.content)
def main():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
url = 'https://github.com/shimohq/chinese-programmer-wrong-pronunciation/blob/master/README.md'
resp = requests.get(url=url, headers=headers)
if resp.ok:
soup = BeautifulSoup(resp.text, 'html.parser')
readme = soup.select('#readme')[0]
need_table = readme.select('table')[0]
rows = need_table.select('tr')
for r in rows[1:]:
columns = r.select('td')
down(os.path.join(r'C:\Users\Administrator\Desktop\发音\英音', columns[0].text), columns[1])
down(os.path.join(r'C:\Users\Administrator\Desktop\发音\美音', columns[0].text), columns[2])
if __name__ == '__main__':
main()
|
418870
|
import os
from utils.opencv_face_detector import detect_face
def gen_labels(images_path, label_file_path):
label_file_path_format2 = label_file_path + '.format2'
if not os.path.exists(label_file_path):
label_file = open(label_file_path, 'w')
label_file_format2 = open(label_file_path_format2, 'w')
train_images = os.listdir(images_path)
train_count = 0
face_count = 0
for i, train_image in enumerate(train_images):
if i % 10 == 0:
print(i, len(train_images))
face_coordinates = detect_face(os.path.join(images_path, train_image))
if len(face_coordinates) == 0:
continue
label_file.write(os.path.join(images_path, train_image))
label_file_format2.write(os.path.join(images_path, train_image) + '\n')
label_file_format2.write(str(len(face_coordinates)) + '\n')
image_face_bboxes = ""
for face_coordinate in face_coordinates:
face_count += 1
(x, y, w, h) = face_coordinate
image_face_bboxes += " {} {} {} {}".format(x, y, w, h)
label_file_format2.write("{} {} {} {}\n".format(x, y, w, h))
label_file.write("{}\n".format(image_face_bboxes))
train_count += 1
print("{} faces detected from {} images: ".format(face_count, train_count))
|
418886
|
import sys
import argparse
import hashlib
import time
prefix = ["cam", "video", "x", "a", "www", "ftp", "ssl", "tftp", "www1",
"www2", "noc", "smtp", "pop", "ssl", "secure", "images", "th",
"img", "download", "mail", "remote", "blog", "webmail", "server",
"ns1", "vpn", "m", "shop", "mail2", "test", "ww1", "support", "dev",
"web", "bbs", "email", "cloud", "gw", "admin", "news"]
verbs = ["be", "have", "do", "say", "go", "get", "make", "know", "think",
"take", "see", "come", "want", "look", "use", "find", "give",
"tell", "work", "call", "try", "ask", "need", "feel", "become",
"leave", "put", "mean", "keep", "let", "begin", "seem", "help",
"talk", "turn", "start", "show", "hear", "play", "run", "move",
"like", "live", "believe", "hold", "bring", "happen", "write",
"provide", "sit", "stand", "lose", "pay", "meet", "include",
"continue", "set", "learn", "change", "lead", "understand",
"watch", "follow", "stop", "create", "speak", "read", "allow",
"add", "spend", "grow", "open", "walk", "win", "offer", "remember",
"love", "consider", "appear", "buy", "wait", "serve", "die",
"send", "expect", "build", "stay", "fall", "cut", "reach",
"kill", "remain", "suggest", "raise", "pass", "sell", "require",
"report", "decide", "pull" ]
adjs = ["able", "acceptable", "according", "accurate", "action", "active",
"actual", "additional", "administrative", "adult", "afraid",
"after", "afternoon", "agent", "aggressive", "ago", "airline",
"alive", "all", "alone", "alternative", "amazing", "angry",
"animal", "annual", "another", "anxious", "any", "apart",
"appropriate", "asleep", "automatic", "available", "aware", "away",
"background", "basic", "beautiful", "beginning", "best", "better",
"big", "bitter", "boring", "born", "both", "brave", "brief",
"bright", "brilliant", "broad", "brown", "budget", "business",
"busy", "calm", "capable", "capital", "car", "careful", "certain",
"chance", "character", "cheap", "chemical", "chicken", "choice",
"civil", "classic", "clean", "clear", "close", "cold",
"comfortable", "commercial", "common", "competitive", "complete",
"complex", "comprehensive", "confident", "connect", "conscious",
"consistent", "constant", "content", "cool", "corner", "correct",
"crazy", "creative", "critical", "cultural", "curious", "current",
"cute", "dangerous", "dark", "daughter", "day", "dead", "dear",
"decent", "deep", "dependent", "designer", "desperate", "different",
"difficult", "direct", "dirty", "distinct", "double", "downtown",
"dramatic", "dress", "drunk", "dry", "due", "each", "east",
"eastern", "easy", "economy", "educational", "effective",
"efficient", "either", "electrical", "electronic", "embarrassed",
"emergency", "emotional", "empty", "enough", "entire",
"environmental", "equal", "equivalent", "even", "evening", "every",
"exact", "excellent", "exciting", "existing", "expensive", "expert",
"express", "extension", "external", "extra", "extreme", "fair",
"false", "familiar", "famous", "far", "fast", "fat", "federal",
"feeling", "female", "few", "final", "financial", "fine", "firm",
"first", "fit", "flat", "foreign", "formal", "former", "forward",
"free", "frequent", "fresh", "friendly", "front", "full", "fun",
"funny", "future", "game", "general", "glad", "glass", "global",
"gold", "good", "grand", "great", "green", "gross", "guilty", "happy",
"hard", "head", "healthy", "heavy", "helpful", "high", "his",
"historical", "holiday", "home", "honest", "horror", "hot", "hour",
"house", "huge", "human", "hungry", "ideal", "ill", "illegal",
"immediate", "important", "impossible", "impressive", "incident",
"independent", "individual", "inevitable", "informal", "initial",
"inner", "inside", "intelligent", "interesting", "internal",
"international", "joint", "junior", "just", "key", "kind", "kitchen",
"known", "large", "last", "late", "latter", "leading", "least",
"leather", "left", "legal", "less", "level", "life", "little", "live",
"living", "local", "logical", "lonely", "long", "loose", "lost",
"loud", "low", "lower", "lucky", "mad", "main", "major", "male",
"many", "massive", "master", "material", "maximum", "mean", "medical",
"medium", "mental", "middle", "minimum", "minor", "minute", "mission",
"mobile", "money", "more", "most", "mother", "motor", "mountain",
"much", "narrow", "nasty", "national", "native", "natural", "nearby",
"neat", "necessary", "negative", "neither", "nervous", "new", "next",
"nice", "normal", "north", "novel", "numerous", "objective",
"obvious", "odd", "official", "ok", "old", "only", "open", "opening",
"opposite", "ordinary", "original", "other", "otherwise", "outside",
"over", "overall", "own", "parking", "particular", "party", "past",
"patient", "perfect", "period", "personal", "physical", "plane",
"plastic", "pleasant", "plenty", "plus", "political", "poor",
"popular", "positive", "possible", "potential", "powerful",
"practical", "pregnant", "present", "pretend", "pretty", "previous",
"primary", "prior", "private", "prize", "professional", "proof",
"proper", "proud", "psychological", "public", "pure", "purple",
"quick", "quiet", "rare", "raw", "ready", "real", "realistic",
"reasonable", "recent", "red", "regular", "relative", "relevant",
"remarkable", "remote", "representative", "resident", "responsible",
"rich", "right", "rough", "round", "routine", "royal", "sad", "safe",
"salt", "same", "savings", "scared", "sea", "secret", "secure",
"select", "senior", "sensitive", "separate", "serious", "several",
"severe", "sexual", "sharp", "short", "shot", "sick", "signal",
"significant", "silly", "silver", "similar", "simple", "single",
"slight", "slow", "small", "smart", "smooth", "soft", "solid", "some",
"sorry", "south", "southern", "spare", "special", "specialist",
"specific", "spiritual", "square", "standard", "status", "still",
"stock", "straight", "strange", "street", "strict", "strong", "stupid",
"subject", "substantial", "successful", "such", "sudden", "sufficient",
"suitable", "super", "sure", "suspicious", "sweet", "swimming", "tall",
"technical", "temporary", "terrible", "that", "then", "these", "thick",
"thin", "think", "this", "tight", "time", "tiny", "top", "total", "tough",
"traditional", "training", "trick", "true", "typical", "ugly", "unable",
"unfair", "unhappy", "unique", "united", "unlikely", "unusual",
"upper", "upset", "upstairs", "used", "useful", "usual", "valuable",
"various", "vast", "vegetable", "visible", "visual", "warm", "waste",
"weak", "weekly", "weird", "west", "western", "what", "which", "white",
"whole", "wide", "wild", "willing", "wine", "winter", "wise",
"wonderful", "wooden", "work", "working", "worth", "wrong",
"yellow", "young"]
nouns = ["a", "ability", "abroad", "abuse", "access", "accident", "account",
"act", "action", "active", "activity", "actor", "ad", "addition",
"address", "administration", "adult", "advance", "advantage",
"advertising", "advice", "affair", "affect", "afternoon", "age",
"agency", "agent", "agreement", "air", "airline", "airport",
"alarm", "alcohol", "alternative", "ambition", "amount",
"analysis", "analyst", "anger", "angle", "animal", "annual",
"answer", "anxiety", "anybody", "anything", "anywhere",
"apartment", "appeal", "appearance", "apple", "application",
"appointment", "area", "argument", "arm", "army", "arrival",
"art", "article", "aside", "ask", "aspect", "assignment", "assist",
"assistance", "assistant", "associate", "association", "assumption",
"atmosphere", "attack", "attempt", "attention", "attitude",
"audience", "author", "average", "award", "awareness", "baby",
"back", "background", "bad", "bag", "bake", "balance", "ball",
"band", "bank", "bar", "base", "baseball", "basis", "basket", "bat",
"bath", "bathroom", "battle", "beach", "bear", "beat", "beautiful",
"bed", "bedroom", "beer", "beginning", "being", "bell", "belt",
"bench", "bend", "benefit", "bet", "beyond", "bicycle", "bid",
"big", "bike", "bill", "bird", "birth", "birthday", "bit", "bite",
"bitter", "black", "blame", "blank", "blind", "block", "blood",
"blow", "blue", "board", "boat", "body", "bone", "bonus", "book",
"boot", "border", "boss", "bother", "bottle", "bottom", "bowl",
"box", "boy", "boyfriend", "brain", "branch", "brave", "bread",
"break", "breakfast", "breast", "breath", "brick", "bridge", "brief",
"brilliant", "broad", "brother", "brown", "brush", "buddy", "budget",
"bug", "building", "bunch", "burn", "bus", "business", "button",
"buy", "buyer", "cabinet", "cable", "cake", "calendar", "call",
"calm", "camera", "camp", "campaign", "can", "cancel", "cancer",
"candidate", "candle", "candy", "cap", "capital", "car", "card",
"care", "career", "carpet", "carry", "case", "cash", "cat", "catch",
"category", "cause", "celebration", "cell", "chain", "chair",
"challenge", "champion", "championship", "chance", "change",
"channel", "chapter", "character", "charge", "charity", "chart",
"check", "cheek", "chemical", "chemistry", "chest", "chicken",
"child", "childhood", "chip", "chocolate", "choice", "church",
"cigarette", "city", "claim", "class", "classic", "classroom",
"clerk", "click", "client", "climate", "clock", "closet", "clothes",
"cloud", "club", "clue", "coach", "coast", "coat", "code", "coffee",
"cold", "collar", "collection", "college", "combination", "combine",
"comfort", "comfortable", "command", "comment", "commercial",
"commission", "committee", "common", "communication", "community",
"company", "comparison", "competition", "complaint", "complex",
"computer", "concentrate", "concept", "concern", "concert",
"conclusion", "condition", "conference", "confidence", "conflict",
"confusion", "connection", "consequence", "consideration", "consist",
"constant", "construction", "contact", "contest", "context",
"contract", "contribution", "control", "conversation", "convert",
"cook", "cookie", "copy", "corner", "cost", "count", "counter",
"country", "county", "couple", "courage", "course", "court", "cousin",
"cover", "cow", "crack", "craft", "crash", "crazy", "cream",
"creative", "credit", "crew", "criticism", "cross", "cry", "culture",
"cup", "currency", "current", "curve", "customer", "cut", "cycle",
"damage", "dance", "dare", "dark", "data", "database", "date",
"daughter", "day", "dead", "deal", "dealer", "dear", "death",
"debate", "debt", "decision", "deep", "definition", "degree",
"delay", "delivery", "demand", "department", "departure", "dependent",
"deposit", "depression", "depth", "description", "design",
"designer", "desire", "desk", "detail", "development", "device",
"devil", "diamond", "diet", "difference", "difficulty", "dig",
"dimension", "dinner", "direction", "director", "dirt", "disaster",
"discipline", "discount", "discussion", "disease", "dish", "disk",
"display", "distance", "distribution", "district", "divide",
"doctor", "document", "dog", "door", "dot", "double", "doubt",
"draft", "drag", "drama", "draw", "drawer", "drawing", "dream",
"dress", "drink", "drive", "driver", "drop", "drunk", "due", "dump",
"dust", "duty", "ear", "earth", "ease", "east", "eat", "economics",
"economy", "edge", "editor", "education", "effect", "effective",
"efficiency", "effort", "egg", "election", "elevator", "emergency",
"emotion", "emphasis", "employ", "employee", "employer", "employment",
"end", "energy", "engine", "engineer", "engineering", "entertainment",
"enthusiasm", "entrance", "entry", "environment", "equal", "equipment",
"equivalent", "error", "escape", "essay", "establishment", "estate",
"estimate", "evening", "event", "evidence", "exam", "examination",
"example", "exchange", "excitement", "excuse", "exercise", "exit",
"experience", "expert", "explanation", "expression", "extension",
"extent", "external", "extreme", "eye", "face", "fact", "factor",
"fail", "failure", "fall", "familiar", "family", "fan", "farm",
"farmer", "fat", "father", "fault", "fear", "feature", "fee", "feed",
"feedback", "feel", "feeling", "female", "few", "field", "fight",
"figure", "file", "fill", "film", "final", "finance", "finding",
"finger", "finish", "fire", "fish", "fishing", "fix", "flight",
"floor", "flow", "flower", "fly", "focus", "fold", "following",
"food", "foot", "football", "force", "forever", "form", "formal",
"fortune", "foundation", "frame", "freedom", "friend", "friendship",
"front", "fruit", "fuel", "fun", "function", "funeral", "funny",
"future", "gain", "game", "gap", "garage", "garbage", "garden",
"gas", "gate", "gather", "gear", "gene", "general", "gift", "girl",
"girlfriend", "give", "glad", "glass", "glove", "go", "goal",
"god", "gold", "golf", "good", "government", "grab", "grade",
"grand", "grandfather", "grandmother", "grass", "great", "green",
"grocery", "ground", "group", "growth", "guarantee", "guard",
"guess", "guest", "guidance", "guide", "guitar", "guy",
"habit", "hair", "half", "hall", "hand", "handle", "hang",
"harm", "hat", "hate", "head", "health", "hearing", "heart",
"heat", "heavy", "height", "hell", "hello", "help", "hide",
"high", "highlight", "highway", "hire", "historian", "history",
"hit", "hold", "hole", "holiday", "home", "homework", "honey",
"hook", "hope", "horror", "horse", "hospital", "host", "hotel",
"hour", "house", "housing", "human", "hunt", "hurry", "hurt",
"husband", "ice", "idea", "ideal", "if", "illegal", "image",
"imagination", "impact", "implement", "importance", "impress",
"impression", "improvement", "incident", "income", "increase",
"independence", "independent", "indication", "individual",
"industry", "inevitable", "inflation", "influence", "information",
"initial", "initiative", "injury", "insect", "inside",
"inspection", "inspector", "instance", "instruction",
"insurance", "intention", "interaction", "interest",
"internal", "international", "internet", "interview",
"introduction", "investment", "invite", "iron", "island",
"issue", "it", "item", "jacket", "job", "join", "joint", "joke",
"judge", "judgment", "juice", "jump", "junior", "jury", "keep",
"key", "kick", "kid", "kill", "kind", "king", "kiss", "kitchen",
"knee", "knife", "knowledge", "lab", "lack", "ladder", "lady",
"lake", "land", "landscape", "language", "laugh", "law", "lawyer",
"lay", "layer", "lead", "leader", "leadership", "leading",
"league", "leather", "leave", "lecture", "leg", "length",
"lesson", "let", "letter", "level", "library", "lie", "life",
"lift", "light", "limit", "line", "link", "lip", "list", "listen",
"literature", "living", "load", "loan", "local", "location",
"lock", "log", "long", "look", "loss", "love", "low", "luck",
"lunch", "machine", "magazine", "mail", "main", "maintenance",
"major", "make", "male", "mall", "man", "management", "manager",
"manner", "manufacturer", "many", "map", "march", "mark",
"market", "marketing", "marriage", "master", "match", "mate",
"material", "math", "matter", "maximum", "maybe", "meal",
"meaning", "measurement", "meat", "media", "medicine", "medium",
"meet", "meeting", "member", "membership", "memory", "mention",
"menu", "mess", "message", "metal", "method", "middle", "midnight",
"might", "milk", "mind", "mine", "minimum", "minor", "minute",
"mirror", "miss", "mission", "mistake", "mix", "mixture", "mobile",
"mode", "model", "mom", "moment", "money", "monitor", "month", "mood",
"morning", "mortgage", "most", "mother", "motor", "mountain",
"mouse", "mouth", "move", "movie", "mud", "muscle", "music",
"nail", "name", "nasty", "nation", "national", "native", "natural",
"nature", "neat", "necessary", "neck", "negative", "negotiation",
"nerve", "net", "network", "news", "newspaper", "night", "nobody",
"noise", "normal", "north", "nose", "note", "nothing", "notice",
"novel", "number", "nurse", "object", "objective", "obligation",
"occasion", "offer", "office", "officer", "official", "oil",
"opening", "operation", "opinion", "opportunity", "opposite",
"option", "orange", "order", "ordinary", "organization", "original",
"other", "outcome", "outside", "oven", "owner", "pace", "pack",
"package", "page", "pain", "paint", "painting", "pair", "panic",
"paper", "parent", "park", "parking", "part", "particular",
"partner", "party", "pass", "passage", "passenger", "passion",
"past", "path", "patience", "patient", "pattern", "pause", "pay",
"payment", "peace", "peak", "pen", "penalty", "pension", "people",
"percentage", "perception", "performance", "period", "permission",
"permit", "person", "personal", "personality", "perspective",
"phase", "philosophy", "phone", "photo", "phrase", "physical",
"physics", "piano", "pick", "picture", "pie", "piece", "pin",
"pipe", "pitch", "pizza", "place", "plan", "plane", "plant",
"plastic", "plate", "platform", "play", "player", "pleasure",
"plenty", "poem", "poet", "poetry", "point", "police", "policy",
"politics", "pollution", "pool", "pop", "population", "position",
"positive", "possession", "possibility", "possible", "post", "pot",
"potato", "potential", "pound", "power", "practice", "preference",
"preparation", "presence", "present", "presentation", "president",
"press", "pressure", "price", "pride", "priest", "primary",
"principle", "print", "prior", "priority", "private", "prize",
"problem", "procedure", "process", "produce", "product", "profession",
"professional", "professor", "profile", "profit", "program",
"progress", "project", "promise", "promotion", "prompt", "proof",
"property", "proposal", "protection", "psychology", "public", "pull",
"punch", "purchase", "purple", "purpose", "push", "put", "quality",
"quantity", "quarter", "queen", "question", "quiet", "quit", "quote",
"race", "radio", "rain", "raise", "range", "rate", "ratio",
"raw", "reach", "reaction", "read", "reading", "reality", "reason",
"reception", "recipe", "recognition", "recommendation", "record",
"recording", "recover", "red", "reference", "reflection",
"refrigerator", "refuse", "region", "register", "regret",
"regular", "relation", "relationship", "relative", "release",
"relief", "remote", "remove", "rent", "repair", "repeat",
"replacement", "reply", "report", "representative", "republic",
"reputation", "request", "requirement", "research", "reserve",
"resident", "resist", "resolution", "resolve", "resort", "resource",
"respect", "respond", "response", "responsibility", "rest",
"restaurant", "result", "return", "reveal", "revenue", "review",
"revolution", "reward", "rice", "rich", "ride", "ring", "rip",
"rise", "risk", "river", "road", "rock", "role", "roll", "roof",
"room", "rope", "rough", "round", "routine", "row", "royal",
"rub", "ruin", "rule", "run", "rush", "sad", "safe", "safety",
"sail", "salad", "salary", "sale", "salt", "sample", "sand",
"sandwich", "satisfaction", "save", "savings", "scale", "scene",
"schedule", "scheme", "school", "science", "score", "scratch",
"screen", "screw", "script", "sea", "search", "season", "seat",
"second", "secret", "secretary", "section", "sector", "security",
"selection", "self", "sell", "senior", "sense", "sensitive",
"sentence", "series", "serve", "service", "session", "set",
"setting", "sex", "shake", "shame", "shape", "share", "she",
"shelter", "shift", "shine", "ship", "shirt", "shock", "shoe",
"shoot", "shop", "shopping", "shot", "shoulder", "show", "shower",
"sick", "side", "sign", "signal", "signature", "significance",
"silly", "silver", "simple", "sing", "singer", "single", "sink",
"sir", "sister", "site", "situation", "size", "skill", "skin",
"skirt", "sky", "sleep", "slice", "slide", "slip", "smell",
"smile", "smoke", "snow", "society", "sock", "soft", "software",
"soil", "solid", "solution", "somewhere", "son", "song", "sort",
"sound", "soup", "source", "south", "space", "spare", "speaker",
"special", "specialist", "specific", "speech", "speed", "spell",
"spend", "spirit", "spiritual", "spite", "split", "sport",
"spot", "spray", "spread", "spring", "square", "stable",
"staff", "stage", "stand", "standard", "star", "start",
"state", "statement", "station", "status", "stay", "steak",
"steal", "step", "stick", "still", "stock", "stomach", "stop",
"storage", "store", "storm", "story", "strain", "stranger",
"strategy", "street", "strength", "stress", "stretch", "strike",
"string", "strip", "stroke", "structure", "struggle", "student",
"studio", "study", "stuff", "stupid", "style", "subject",
"substance", "success", "suck", "sugar", "suggestion", "suit",
"summer", "sun", "supermarket", "support", "surgery", "surprise",
"surround", "survey", "suspect", "sweet", "swim", "swimming",
"swing", "switch", "sympathy", "system", "table", "tackle", "tale",
"talk", "tank", "tap", "target", "task", "taste", "tax", "tea",
"teach", "teacher", "teaching", "team", "tear", "technology",
"telephone", "television", "tell", "temperature", "temporary",
"tennis", "tension", "term", "test", "text", "thanks", "theme",
"theory", "thing", "thought", "throat", "ticket", "tie", "till",
"time", "tip", "title", "today", "toe", "tomorrow", "tone",
"tongue", "tonight", "tool", "tooth", "top", "topic", "total",
"touch", "tough", "tour", "tourist", "towel", "tower", "town",
"track", "trade", "tradition", "traffic", "train", "trainer",
"training", "transition", "transportation", "trash", "travel",
"treat", "tree", "trick", "trip", "trouble", "truck", "trust",
"truth", "try", "tune", "turn", "twist", "type", "uncle",
"understanding", "union", "unique", "unit", "university",
"upper", "upstairs", "use", "user", "usual", "vacation",
"valuable", "value", "variation", "variety", "vast", "vegetable",
"vehicle", "version", "video", "view", "village", "virus",
"visit", "visual", "voice", "volume", "wait", "wake", "walk",
"wall", "war", "warning", "wash", "watch", "water", "wave", "way",
"weakness", "wealth", "wear", "weather", "web", "wedding", "week",
"weekend", "weight", "weird", "welcome", "west", "western", "wheel",
"whereas", "while", "white", "whole", "wife", "will", "win",
"wind", "window", "wine", "wing", "winner", "winter", "wish",
"witness", "woman", "wonder", "wood", "word", "work", "worker",
"working", "world", "worry", "worth", "wrap", "writer", "writing",
"yard", "year", "yellow", "yesterday", "you", "young", "youth",
"zone"]
TLDs = ["art", "click", "club", "com", "fans", "futbol", "in", "info",
"link", "net", "nl", "observer", "one", "org", "pictures",
"realty", "rocks", "tel", "top", "xyz"]
class domain_generator:
def __init__(self, ts):
global prefix
global verbs
global adjs
global nouns
global TLDs
self.prefix = prefix
self.verbs = verbs
self.adjs = adjs
self.nouns = nouns
self.tlds = TLDs
self.seed = self.init_seed(ts)
def init_seed(self, ts):
timea = time.localtime(ts)
seed_str = time.strftime("%b %Y 00:00", timea)
s = hashlib.sha256()
s.update(seed_str)
seed = int(s.hexdigest()[:8], 16)
return seed
def rand(self):
x = self.seed ^ ((self.seed << 13) & 0xffffffff)
y = x ^ (x >> 17)
self.seed = (y ^ 32 * y) & 0xffffffff;
def generate_domain(self):
#c2 format: [prefix.]verbe[-]adj[-]noun.TLD
domain = ''
#prefix
self.rand()
if self.seed % 5 == 0:
self.rand()
domain += self.prefix[self.seed % len(self.prefix)]
domain += '.'
#verb
self.rand()
domain += self.verbs[self.seed % len(self.verbs)]
self.rand()
if self.seed % 10 <= 1:
domain += '-'
#adj
self.rand()
domain += self.adjs[self.seed % len(self.adjs)]
self.rand()
if self.seed % 10 <= 1:
domain += '-'
#noun
self.rand()
domain += self.nouns[self.seed % len(self.nouns)]
#TLD
self.rand()
domain += '.' + self.tlds[self.seed % len(self.tlds)]
return domain
def generate_domains(self, nr):
for d in range(nr):
print(self.generate_domain())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time', help="Seconds since January 1, 1970 UTC")
parser.add_argument("-n", "--nr", help="nr of domains", type=int, default=1000)
args = parser.parse_args()
dg = domain_generator(int(args.time))
dg.generate_domains(int(args.nr))
|
418956
|
import csv
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.test.utils import override_settings
from frontend import bq_schemas as schemas
from gcutils.bigquery import Client
def import_test_data_full(directory, data_factory, end_date, months=None):
"""
Imports the data in `data_factory` into an SQLite file in `directory` while
exercising the entire matrixstore_build pipeline. This includes uploading
data to BigQuery and exporting it to Google Cloud Storage and so it can
take several minutes to run.
Returns the path of the newly created file
"""
upload_to_bigquery(data_factory)
with override_settings(
MATRIXSTORE_IMPORT_DIR=directory, MATRIXSTORE_BUILD_DIR=directory
):
return call_command("matrixstore_build", end_date, months=months, quiet=True)
def upload_to_bigquery(data_factory):
client = Client("hscic")
assert_is_test_dataset(client)
create_and_populate_bq_table(
client, "presentation", schemas.PRESENTATION_SCHEMA, data_factory.presentations
)
create_and_populate_bq_table(
client, "prescribing_v2", schemas.PRESCRIBING_SCHEMA, data_factory.prescribing
)
create_and_populate_bq_table(
client,
"practice_statistics_all_years",
schemas.PRACTICE_STATISTICS_SCHEMA,
data_factory.practice_statistics,
)
create_and_populate_bq_table(
client, "bnf_map", schemas.BNF_MAP_SCHEMA, data_factory.bnf_map
)
def assert_is_test_dataset(client):
bq_nonce = getattr(settings, "BQ_NONCE", None)
if not bq_nonce or str(bq_nonce) not in client.dataset_id:
raise RuntimeError("BQ_NONCE must be set")
def create_and_populate_bq_table(client, name, schema, table_data):
table = client.get_or_create_table(name, schema)
if not table_data:
return
with tempfile.NamedTemporaryFile("wt") as f:
writer = csv.writer(f)
for item in table_data:
writer.writerow(dict_to_row(item, schema))
f.seek(0)
table.insert_rows_from_csv(f.name, schema)
def dict_to_row(dictionary, schema):
row = [dictionary[field.name] for field in schema]
if len(row) != len(schema):
extra = set(dictionary) - set([field.name for field in schema])
raise ValueError(
"Dictionary has keys which are not in BigQuery schema: {}".format(
", ".join(extra)
)
)
return row
|
418958
|
from numpy import (
allclose,
isnan
)
from . import pwm
def test_create():
m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows())
# Alphabet sort
assert m.sorted_alphabet == ['A', 'C', 'G', 'T']
# Character to index mapping
assert m.char_to_index[ord('A')] == 0
assert m.char_to_index[ord('C')] == 1
assert m.char_to_index[ord('G')] == 2
assert m.char_to_index[ord('T')] == 3
assert m.char_to_index[ord('Q')] == -1
# Values
assert allclose(m.values[0], [2620, 2052, 3013, 2314])
assert allclose(m.values[19], [3144, 3231, 3056, 567])
def test_scoring():
m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows())
# Stormo method
sm = m.to_stormo_scoring_matrix()
# Forward matches
assert allclose(sm.score_string("AATCACCACCTCCTGGCAGG")[0], -156.8261261)
assert allclose(sm.score_string("TGCCTGCCTCTGTAGGCTCC")[0], -128.8106842)
assert allclose(sm.score_string("GTTGCCAGTTGGGGGAAGCA")[0], 4.65049839)
assert allclose(sm.score_string("GCAGACACCAGGTGGTTCAG")[0], 1.60168743)
# Reverse matches
rc = sm.reverse_complement()
assert allclose(rc.score_string("AATCACCACCTCCTGGCAGG")[0], 0.014178276062)
assert allclose(rc.score_string("TGCCTGCCTCTGTAGGCTCC")[0], 0.723828315735)
assert allclose(rc.score_string("GTTGCCAGTTGGGGGAAGCA")[0], -126.99407196)
assert allclose(rc.score_string("GCAGACACCAGGTGGTTCAG")[0], -86.9560623169)
# Nothing valid
assert isnan(sm.score_string_with_gaps("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")).all()
# Too short
assert isnan(sm.score_string("TTTT")).all()
def test_scoring_with_gaps():
m = pwm.FrequencyMatrix.from_rows(['A', 'C', 'G', 'T'], get_ctcf_rows())
# Stormo method
sm = m.to_stormo_scoring_matrix()
# Forward matches
assert allclose(sm.score_string_with_gaps("GTTGCCAGT----TGGGGGAAGCATTT---AA")[0], 4.65049839)
assert allclose(sm.score_string_with_gaps("GCAGA--CACCAGGTGG--TTCAG---")[0], 1.60168743)
assert allclose(sm.score_string_with_gaps("----GTTGCCAGTTGGGGGAAGCA")[4], 4.65049839)
assert allclose(sm.score_string_with_gaps("TTT--GTT--GCCA--GTTGGGG-G-A-A-G-C-A-")[5], 4.65049839)
assert isnan(sm.score_string_with_gaps("TTT--GTT--GCCA--GTTGGGG-G-A-A-G-C-A-")[4])
# Nothing valid
assert isnan(sm.score_string_with_gaps("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")).all()
assert isnan(sm.score_string_with_gaps("------------------------------------")).all()
# Too short
assert isnan(sm.score_string_with_gaps("TTTT")).all()
assert isnan(sm.score_string_with_gaps("TTTT----")).all()
def get_ctcf_rows():
"""
The CTCF primary site motif
"""
return [
[2620, 2052, 3013, 2314],
[0, 3580, 1746, 4672],
[2008, 1790, 4497, 1703],
[3362, 0, 6637, 0],
[0, 10000, 0, 0],
[0, 10000, 0, 0],
[7467, 0, 1310, 1222],
[786, 4890, 4323, 0],
[1179, 6288, 829, 1703],
[10000, 0, 0, 0],
[0, 0, 10000, 0],
[4847, 0, 5152, 0],
[0, 0, 6200, 3799],
[0, 0, 10000, 0],
[0, 0, 10000, 0],
[1572, 7467, 0, 960],
[3842, 0, 5545, 611],
[0, 5895, 4104, 0],
[1615, 4192, 1397, 2794],
[3144, 3231, 3056, 567]
]
|
418994
|
for row in range(4):
for col in range(7):
if row-col==0 or row+col==6:
print('*',end=' ')
else:
print(' ',end=' ')
print()
## Method -2
i =0
j = 6
for row in range(4):
for col in range(7):
if row==col:
print('*',end=' ')
elif row==i and col==j:
print('*',end=' ')
i=i+1
j=j-1
else:
print(' ',end=' ')
print()
|
419056
|
import numpy as np
__all__ = ['formatter']
def formatter(prop_keys, fmt = '%.6e', base = '', default = '%.6e'):
"""
Formatter function for a given set of properties.
Parameters
----------
fmt : str or dict, optional
If str: Format string for all the columns. Defaults to '%.6e'.\n
If dict: the input keys of the dictionary must be present in the ``prop_keys`` list. The keys from ``prop_keys`` which
were not specified in the ``fmt`` dict, will return the default format ``default``.
base : str, optional
Preceding format(s) to the final-computed format string. Defaults to None.
default : str, optional
Default format for non-specified keys if fmt is dict.
Returns
-------
fmt_string : str
String with the computed set of formats, to be used on output prints or files.
"""
prop_keys = np.asarray(prop_keys)
n = len(prop_keys)
nvec = range(n)
fmt_string = base
if isinstance(fmt, str): #If a single format is provided
print ("Using format '%s' for all the properties"%fmt)
for i in nvec: fmt_string += ' '+fmt #Same format for all properties
elif isinstance(fmt, dict): #If a dict of formats
fmt_list = np.array([default] * n)
for key in fmt:
if key in prop_keys: fmt_list = np.where(np.array(key) == prop_keys, fmt[key], fmt_list)
else: raise KeyError("The property '%s' provided in 'fmt' was not defined in the 'prop' object of physical properties."%key)
print ('Using formats {} for properties {}'.format(fmt_list, prop_keys))
for f in fmt_list: fmt_string += ' '+f
else: raise TypeError("Invalid type %s for 'fmt'. Please provide a valid 'fmt' object: str, list or np.ndarray"%type(fmt))
fmt_string += '\n'
return fmt_string
|
419103
|
from threading import Thread
import time
class Observable(object):
def __init__(self):
self._observers = set()
def add_observer(self, observer):
self._observers.add(observer)
def remove_observer(self, observer):
self._observers.remove(observer)
def notify_observers(self, event):
for observer in self._observers:
observer.update(self, event)
class Observer(object):
def update(self, observable, event):
raise NotImplementedError('Este metodo es abstracto!')
class MyObservable(Thread, Observable):
def __init__(self, *args, **kargs):
Thread.__init__(self, *args, **kargs)
Observable.__init__(self, *args, **kargs)
self._finish = False
def run(self):
while not self._finish:
self.fire_event()
time.sleep(0.1)
def fire_event(self):
self.notify_observers("¿Que paso?")
def stop(self):
self._finish = True
class MyObserver(Observer):
def update(self, observable, event):
print ("Algo Paso")
def main():
myobservable = MyObservable()
myobserver = MyObserver()
myobservable.add_observer(myobserver)
myobservable.start()
time.sleep(2)
myobservable.stop()
print("Terminado!")
if __name__ == '__main__':
main()
|
419134
|
from mockseries.interaction.additive_interaction import AdditiveInteraction
from mockseries.interaction.multiplicative_interaction import MultiplicativeInteraction
ADDITIVE = AdditiveInteraction()
MULTIPLICATIVE = MultiplicativeInteraction()
|
419140
|
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import vstack, hstack
from scipy.sparse.linalg import inv
from sklearn.utils.extmath import randomized_svd
from utils.progress import WorkSplitter, inhour
import time
def pop(matrix_train, **unused):
"""
Function used to achieve generalized projected lrec w/o item-attribute embedding
:param matrix_train: user-item matrix with shape m*n
:param embeded_matrix: item-attribute matrix with length n (each row represents one item)
:param lam: parameter of penalty
:param k_factor: ratio of the latent dimension/number of items
:return: prediction in sparse matrix
"""
progress = WorkSplitter()
m,n = matrix_train.shape
item_popularity = np.array(np.sum(matrix_train, axis=0)).flatten()
RQ = np.ones((m, 1))
Y = item_popularity.reshape((1, n))
return RQ, Y, None
|
419144
|
import winbrewtest
import winbrew.execute
class MockArgs(object):
force = False
class InstallPlanTest(winbrewtest.TestCase):
def test_dependencies(self):
formula = winbrew.Formula.formula_by_name('sfml')()
plan = winbrew.execute.InstallPlan([formula], MockArgs())
plan = [p.name for p in plan]
assert(plan == ['cmake', 'sfml'])
def test_multiple(self):
formula = [winbrew.Formula.formula_by_name(n)() for n in ('openssl','sfml')]
plan = winbrew.execute.InstallPlan(formula, MockArgs())
plan = [p.name for p in plan]
assert(plan == ['perl', 'openssl', 'cmake', 'sfml'])
|
419190
|
import logging
from flask import (
g, request, abort, render_template, url_for, redirect, flash)
from flask_login import login_required
from piecrust.page import Page
from piecrust.sources.interfaces import IInteractiveSource
from piecrust.uriutil import split_uri
from ..blueprint import foodtruck_bp
from ..views import with_menu_context
logger = logging.getLogger(__name__)
@foodtruck_bp.route('/write/<source_name>', methods=['GET', 'POST'])
@login_required
def write_page(source_name):
pcapp = g.site.piecrust_app
source = pcapp.getSource(source_name)
if source is None:
abort(400)
if not isinstance(source, IInteractiveSource):
abort(400)
if request.method == 'POST':
if 'do_save' in request.form:
return _submit_page_form(pcapp, source)
abort(400)
return _write_page_form(source)
def _write_page_form(source):
data = {}
data['is_new_page'] = True
data['source_name'] = source.name
data['url_postback'] = url_for('.write_page', source_name=source.name)
data['fields'] = []
for f in source.getInteractiveFields():
data['fields'].append({
'name': f.name,
'display_name': f.name,
'type': f.field_type,
'value': f.default_value})
tpl_names = []
pcapp = g.site.piecrust_app
for ext in pcapp.getCommandExtensions('prepare'):
try:
tpl_names += list(ext.getTemplateNames(pcapp))
except AttributeError:
pass # For extensions that don't define `getTemplateNames`.
data['content_templates'] = tpl_names
with_menu_context(data)
return render_template('create_page.html', **data)
def _submit_page_form(pcapp, source):
metadata = {}
for f in source.getInteractiveFields():
metadata[f.name] = f.default_value
for fk, fv in request.form.items():
if fk.startswith('meta-'):
metadata[fk[5:]] = fv
tpl_name = request.form['content-template']
logger.debug("Creating content with template '%s' and metadata: %s" %
(tpl_name, str(metadata)))
from piecrust.commands.builtin.scaffolding import build_content
content_item = build_content(source, metadata, tpl_name)
flash("'%s' was created." % content_item.spec)
page = Page(source, content_item)
uri = page.getUri()
logger.debug("Redirecting to: %s" % uri)
_, rel_url = split_uri(page.app, uri)
return redirect(url_for('.edit_page', url=rel_url))
|
419198
|
from __future__ import unicode_literals
from pyramid.settings import asbool
from pyramid.request import Request
from pyramid.decorator import reify
from pyramid.events import NewResponse
from pyramid.events import NewRequest
from pyramid.events import subscriber
from billy.models.model_factory import ModelFactory
from billy.api.utils import get_processor_factory
class APIRequest(Request):
@reify
def session(self):
"""Session object for database operations
"""
settings = self.registry.settings
return settings['session']
@reify
def model_factory(self):
"""The factory for creating data models
"""
settings = self.registry.settings
model_factory_func = settings.get('model_factory_func')
if model_factory_func is not None:
return model_factory_func()
processor_factory = get_processor_factory(settings)
return ModelFactory(
session=self.session,
processor_factory=processor_factory,
settings=settings,
)
@subscriber(NewResponse)
def clean_balanced_processor_key(event):
"""This ensures we won't leave the API key of balanced to the same thread
(as there is a thread local object in Balanced API), in case of using it
later by accident, or for security reason.
"""
import balanced
balanced.configure(None)
@subscriber(NewRequest)
def clean_db_session(event):
"""Clean up DB session when the request processing is finished
"""
def clean_up(request):
request.session.remove()
settings = event.request.registry.settings
db_session_cleanup = asbool(settings.get('db_session_cleanup', True))
if db_session_cleanup:
event.request.add_finished_callback(clean_up)
|
419199
|
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_lazy
from odk_viewer.models import DataDictionary
from utils.model_tools import queryset_iterator
class Command(BaseCommand):
help = ugettext_lazy("Insert UUID into XML of all existing XForms")
def handle(self, *args, **kwargs):
print (_('%(nb)d XForms to update')
% {'nb': DataDictionary.objects.count()})
for i, dd in enumerate(queryset_iterator(DataDictionary.objects.all())):
if dd.xls:
dd._set_uuid_in_xml()
super(DataDictionary, dd).save()
if (i + 1) % 10 == 0:
print _('Updated %(nb)d XForms...') % {'nb': i}
|
419232
|
import argparse
import math
import os
import torch
import pyro
import pandas as pd
import mlflow
import mlflow.pytorch
from mlflow.tracking import MlflowClient
from experiment_tools.output_utils import get_mlflow_meta
from experiment_tools.pyro_tools import auto_seed
def evaluate_policy(
experiment_id,
run_id=None,
seed=-1,
n_rollout=1000, # number of rollouts
device="cuda",
):
pyro.clear_param_store()
seed = auto_seed(seed)
if not os.path.exists("mlflow_outputs"):
os.makedirs("mlflow_outputs")
if run_id:
experiment_run_ids = [run_id]
else:
filter_string = ""
meta = get_mlflow_meta(experiment_id=experiment_id, filter_string=filter_string)
meta = [m for m in meta if "eval_seed" not in m.data.params.keys()]
experiment_run_ids = [run.info.run_id for run in meta]
from_source = [
True if "from_source" in m.data.params.keys() else False for m in meta
]
print(experiment_run_ids)
for i, run_id in enumerate(experiment_run_ids):
if from_source[i]:
## calculate average metric
client = MlflowClient()
metric = client.get_metric_history(run_id, "information_gain")
igs = [m.value for m in metric]
n_rollout = len(igs)
num_experiments = int(client.get_run(run_id).data.params["num_experiments"])
information_gain = torch.tensor(igs)
else:
model_location = f"mlruns/{experiment_id}/{run_id}/artifacts/model"
deathprocess = mlflow.pytorch.load_model(
model_location, map_location=device
)
num_experiments = deathprocess.T
deathprocess.eval(n_trace=1, theta=torch.tensor(1.5, device=device))
grid_min, grid_max, grid_n = 0, 20, 1000
data = deathprocess.rollout(
n_rollout, torch.linspace(grid_min, grid_max, grid_n, device=device)
)
prior_log_prob = data.nodes["theta"]["log_prob"]
mesh_density = math.exp(-prior_log_prob.logsumexp(0)[0].item())
posterior_log_prob = sum(
node["log_prob"]
for node in data.nodes.values()
if node["type"] == "sample" and node.get("subtype") != "design_sample"
)
posterior_log_prob = (
posterior_log_prob
- posterior_log_prob.logsumexp(0)
- math.log(mesh_density)
)
posterior_entropy = (
mesh_density * posterior_log_prob.exp() * (-posterior_log_prob)
).sum(0)
prior_entropy = (
mesh_density * prior_log_prob.exp() * (-prior_log_prob)
).sum(0)
information_gain = prior_entropy - posterior_entropy
print(information_gain.mean(), information_gain.std() / math.sqrt(n_rollout))
res = pd.DataFrame(
{
"EIG_mean": information_gain.mean().item(),
"EIG_se": (information_gain.std() / math.sqrt(n_rollout)).item(),
},
index=[num_experiments],
)
res.to_csv("mlflow_outputs/dp_eval.csv")
with mlflow.start_run(run_id=run_id, experiment_id=experiment_id) as run:
mlflow.log_param("eval_seed", seed)
mlflow.log_artifact(
"mlflow_outputs/dp_eval.csv", artifact_path="evaluation"
)
mlflow.log_metric("eval_MI", information_gain.mean().item())
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Deep Adaptive Design: Death Process Evaluation."
)
parser.add_argument("--seed", default=-1, type=int)
parser.add_argument("--experiment-id", default="9", type=str)
parser.add_argument("--run-id", default=None, type=str)
parser.add_argument("--num-rollouts", default=10000, type=int)
parser.add_argument("--device", default="cuda", type=str)
args = parser.parse_args()
evaluate_policy(
seed=args.seed,
experiment_id=args.experiment_id,
run_id=args.run_id,
device=args.device,
n_rollout=args.num_rollouts,
)
|
419239
|
import numpy as np
import numpy.random as npr
import scipy as sc
from scipy import stats
from scipy.special import logsumexp
from scipy.stats import multivariate_normal as mvn
from scipy.stats import invwishart
from sds.utils.stats import multivariate_normal_logpdf as lg_mvn
from sds.utils.general import linear_regression, one_hot
from sds.distributions.categorical import Categorical
from sds.distributions.gaussian import StackedGaussiansWithPrecision
from sds.distributions.gaussian import StackedGaussiansWithDiagonalPrecision
from sds.distributions.lingauss import StackedLinearGaussiansWithPrecision
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithDiagonalPrecision
from sklearn.preprocessing import PolynomialFeatures
from functools import partial
from operator import mul
import copy
class InitCategoricalState:
def __init__(self, nb_states, **kwargs):
self.nb_states = nb_states
self.pi = 1. / self.nb_states * np.ones(self.nb_states)
@property
def params(self):
return self.pi
@params.setter
def params(self, value):
self.pi = value
def permute(self, perm):
self.pi = self.pi[perm]
def initialize(self):
pass
def likeliest(self):
return np.argmax(self.pi)
def sample(self):
return npr.choice(self.nb_states, p=self.pi)
def log_init(self):
return np.log(self.pi)
def mstep(self, p, **kwargs):
eps = kwargs.get('eps', 1e-8)
pi = sum([_p[0, :] for _p in p]) + eps
self.pi = pi / sum(pi)
class InitGaussianObservation:
def __init__(self, nb_states, obs_dim, act_dim, nb_lags=1, **kwargs):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
# self.mu = npr.randn(self.nb_states, self.obs_dim)
# self._sigma_chol = 5. * npr.randn(self.nb_states, self.obs_dim, self.obs_dim)
self.mu = np.zeros((self.nb_states, self.obs_dim))
self._sigma_chol = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
for k in range(self.nb_states):
_sigma = invwishart.rvs(self.obs_dim + 1, np.eye(self.obs_dim))
self._sigma_chol[k] = np.linalg.cholesky(_sigma * np.eye(self.obs_dim))
self.mu[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
@property
def sigma(self):
return np.matmul(self._sigma_chol, np.swapaxes(self._sigma_chol, -1, -2))
@sigma.setter
def sigma(self, value):
self._sigma_chol = np.linalg.cholesky(value + 1e-8 * np.eye(self.obs_dim))
@property
def params(self):
return self.mu, self._sigma_chol
@params.setter
def params(self, value):
self.mu, self._sigma_chol = value
def permute(self, perm):
self.mu = self.mu[perm]
self._sigma_chol = self._sigma_chol[perm]
def initialize(self, x, **kwargs):
x0 = np.vstack([_x[:self.nb_lags] for _x in x])
self.mu = np.array([np.mean(x0, axis=0) for k in range(self.nb_states)])
self.sigma = np.array([np.cov(x0, rowvar=False) for k in range(self.nb_states)])
def mean(self, z):
return self.mu[z]
def sample(self, z):
x = mvn(mean=self.mean(z), cov=self.sigma[z]).rvs()
return np.atleast_1d(x)
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
x0 = x[:self.nb_lags]
log_lik = np.zeros((x0.shape[0], self.nb_states))
for k in range(self.nb_states):
log_lik[:, k] = lg_mvn(x0, self.mean(k), self.sigma[k])
return log_lik
else:
return list(map(self.log_likelihood, x))
def mstep(self, p, x, **kwargs):
x0, p0 = [], []
for _x, _p in zip(x, p):
x0.append(_x[:self.nb_lags])
p0.append(_p[:self.nb_lags])
J = np.zeros((self.nb_states, self.obs_dim))
h = np.zeros((self.nb_states, self.obs_dim))
for _x, _p in zip(x0, p0):
J += np.sum(_p[:, :, None], axis=0)
h += np.sum(_p[:, :, None] * _x[:, None, :], axis=0)
self.mu = h / J
sqerr = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
norm = np.zeros((self.nb_states, ))
for _x, _p in zip(x0, p0):
resid = _x[:, None, :] - self.mu
sqerr += np.sum(_p[:, :, None, None] * resid[:, :, None, :]
* resid[:, :, :, None], axis=0)
norm += np.sum(_p, axis=0)
self.sigma = sqerr / norm[:, None, None]
def smooth(self, p, x):
if all(isinstance(i, np.ndarray) for i in [p, x]):
p0 = p[:self.nb_lags]
return p0.dot(self.mu)
else:
return list(map(self.smooth, p, x))
class InitGaussianControl:
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags=1, degree=1, **kwargs):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.degree = degree
self.feat_dim = int(sc.special.comb(self.degree + self.obs_dim, self.degree)) - 1
self.basis = PolynomialFeatures(self.degree, include_bias=False)
# self.K = npr.randn(self.nb_states, self.act_dim, self.feat_dim)
# self.kff = npr.randn(self.nb_states, self.act_dim)
# self._sigma_chol = 5. * npr.randn(self.nb_states, self.act_dim, self.act_dim)
self.K = np.zeros((self.nb_states, self.act_dim, self.feat_dim))
self.kff = np.zeros((self.nb_states, self.act_dim))
self._sigma_chol = np.zeros((self.nb_states, self.act_dim, self.act_dim))
for k in range(self.nb_states):
_sigma = invwishart.rvs(self.act_dim + 1, np.eye(self.act_dim))
self._sigma_chol[k] = np.linalg.cholesky(_sigma * np.eye(self.act_dim))
self.K[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(self.feat_dim, )).T
self.kff[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
@property
def sigma(self):
return np.matmul(self._sigma_chol, np.swapaxes(self._sigma_chol, -1, -2))
@sigma.setter
def sigma(self, value):
self._sigma_chol = np.linalg.cholesky(value + 1e-8 * np.eye(self.act_dim))
@property
def params(self):
return self.K, self.kff, self._sigma_chol
@params.setter
def params(self, value):
self.K, self.kff, self._sigma_chol = value
def permute(self, perm):
self.K = self.K[perm]
self.kff = self.kff[perm]
self._sigma_chol = self._sigma_chol[perm]
def initialize(self, x, u, **kwargs):
mu0 = kwargs.get('mu0', 0.)
sigma0 = kwargs.get('sigma0', 1e64)
psi0 = kwargs.get('psi0', 1.)
nu0 = kwargs.get('nu0', self.act_dim + 1)
x0 = np.vstack([_x[:self.nb_lags] for _x in x])
u0 = np.vstack([_u[:self.nb_lags] for _u in u])
f0 = self.featurize(x0)
K, kff, sigma = linear_regression(f0, u0, weights=None, fit_intercept=True,
mu0=mu0, sigma0=sigma0, psi0=psi0, nu0=nu0)
self.K = np.array([K for _ in range(self.nb_states)])
self.kff = np.array([kff for _ in range(self.nb_states)])
self.sigma = np.array([sigma for _ in range(self.nb_states)])
def featurize(self, x):
feat = self.basis.fit_transform(np.atleast_2d(x))
return np.squeeze(feat) if x.ndim == 1\
else np.reshape(feat, (x.shape[0], -1))
def mean(self, z, x):
feat = self.featurize(x)
u = np.einsum('kh,...h->...k', self.K[z], feat) + self.kff[z]
return np.atleast_1d(u)
def sample(self, z, x):
u = mvn(mean=self.mean(z, x), cov=self.sigma[z]).rvs()
return np.atleast_1d(u)
def log_likelihood(self, x, u):
if isinstance(x, np.ndarray):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
log_lik = np.zeros((u0.shape[0], self.nb_states))
for k in range(self.nb_states):
log_lik[:, k] = lg_mvn(u0, self.mean(k, x0), self.sigma[k])
return log_lik
else:
return list(map(self.log_likelihood, x, u))
def mstep(self, p, x, u, **kwargs):
mu0 = kwargs.get('mu0', 0.)
sigma0 = kwargs.get('sigma0', 1e64)
psi0 = kwargs.get('psi0', 1.)
nu0 = kwargs.get('nu0', self.act_dim + 1)
x0, u0, p0 = [], [], []
for _x, _u, _p in zip(x, u, p):
x0.append(_x[:self.nb_lags])
u0.append(_u[:self.nb_lags])
p0.append(_p[:self.nb_lags])
f0 = list(map(self.featurize, x0))
_sigma = np.zeros((self.nb_states, self.act_dim, self.act_dim))
for k in range(self.nb_states):
coef, intercept, sigma = linear_regression(Xs=np.vstack(f0), ys=np.vstack(u0),
weights=np.vstack(p0)[:, k], fit_intercept=True,
mu0=mu0, sigma0=sigma0, psi0=psi0, nu0=nu0)
self.K[k] = coef
self.kff[k] = intercept
_sigma[k] = sigma
self.sigma = _sigma
def smooth(self, p, x, u):
if all(isinstance(i, np.ndarray) for i in [p, x, u]):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
p0 = p[:self.nb_lags]
mu = np.zeros((len(u0), self.nb_states, self.act_dim))
for k in range(self.nb_states):
mu[:, k, :] = self.mean(k, x0)
return np.einsum('nk,nkl->nl', p, mu)
else:
return list(map(self.smooth, p, x, u))
class BayesianInitCategoricalState:
def __init__(self, nb_states, prior, likelihood=None):
self.nb_states = nb_states
# Dirichlet prior
self.prior = prior
# Dirichlet posterior
self.posterior = copy.deepcopy(prior)
# Categorical likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
pi = self.prior.rvs()
self.likelihood = Categorical(dim=nb_states, pi=pi)
@property
def params(self):
return self.likelihood.pi
@params.setter
def params(self, value):
self.likelihood.pi = value
def permute(self, perm):
self.likelihood.pi = self.likelihood.pi[perm]
def initialize(self):
pass
def likeliest(self):
return np.argmax(self.likelihood.pi)
def sample(self):
return npr.choice(self.nb_states, p=self.likelihood.pi)
def log_init(self):
return np.log(self.likelihood.pi)
def mstep(self, p, **kwargs):
p0 = [_p[0, :] for _p in p]
stats = self.likelihood.weighted_statistics(None, p0)
self.posterior.nat_param = self.prior.nat_param + stats
try:
self.likelihood.params = self.posterior.mode()
except AssertionError:
self.likelihood.params = self.posterior.mean()
self.empirical_bayes(**kwargs)
def empirical_bayes(self, lr=1e-3):
grad = self.prior.log_likelihood_grad(self.likelihood.params)
self.prior.params = self.prior.params + lr * grad
class _BayesianInitGaussianObservationBase:
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags, prior, likelihood=None):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.prior = prior
self.posterior = copy.deepcopy(prior)
self.likelihood = likelihood
@property
def params(self):
return self.likelihood.params
@params.setter
def params(self, values):
self.likelihood.params = values
def permute(self, perm):
raise NotImplementedError
def initialize(self, x, **kwargs):
kmeans = kwargs.get('kmeans', True)
x0 = [_x[:self.nb_lags] for _x in x]
t = list(map(len, x0))
if kmeans:
from sklearn.cluster import KMeans
km = KMeans(self.nb_states)
km.fit(np.vstack(x0))
z0 = np.split(km.labels_, np.cumsum(t)[:-1])
else:
z0 = list(map(partial(npr.choice, self.nb_states), t))
z0 = list(map(partial(one_hot, self.nb_states), z0))
stats = self.likelihood.weighted_statistics(x0, z0)
self.posterior.nat_param = self.prior.nat_param + stats
self.likelihood.params = self.posterior.rvs()
def mean(self, z):
x = self.likelihood.dists[z].mean()
return np.atleast_1d(x)
def sample(self, z):
x = self.likelihood.dists[z].rvs()
return np.atleast_1d(x)
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
x0 = x[:self.nb_lags]
return self.likelihood.log_likelihood(x0)
else:
return list(map(self.log_likelihood, x))
def mstep(self, p, x, **kwargs):
x0, p0 = [], []
for _x, _p in zip(x, p):
x0.append(_x[:self.nb_lags])
p0.append(_p[:self.nb_lags])
stats = self.likelihood.weighted_statistics(x0, p0)
self.posterior.nat_param = self.prior.nat_param + stats
self.likelihood.params = self.posterior.mode()
self.empirical_bayes(**kwargs)
def empirical_bayes(self, lr=np.array([0., 0., 1e-3, 1e-3])):
raise NotImplementedError
def smooth(self, p, x):
if all(isinstance(i, np.ndarray) for i in [p, x]):
p0 = p[:self.nb_lags]
return p0.dot(self.likelihood.mus)
else:
return list(map(self.smooth, p, x))
class BayesianInitGaussianObservation(_BayesianInitGaussianObservationBase):
# mu = np.zeros((obs_dim,))
# kappa = 1e-64
# psi = 1e8 * np.eye(obs_dim) / (obs_dim + 1)
# nu = (obs_dim + 1) + obs_dim + 1
#
# from sds.distributions.composite import StackedNormalWishart
# prior = StackedNormalWishart(nb_states, obs_dim,
# mus=np.array([mu for _ in range(nb_states)]),
# kappas=np.array([kappa for _ in range(nb_states)]),
# psis=np.array([psi for _ in range(nb_states)]),
# nus=np.array([nu for _ in range(nb_states)]))
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags, prior, likelihood=None):
super(BayesianInitGaussianObservation, self).__init__(nb_states, obs_dim, act_dim,
nb_lags, prior, likelihood)
# Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mus, lmbdas = self.prior.rvs()
self.likelihood = StackedGaussiansWithPrecision(size=self.nb_states,
dim=self.obs_dim,
mus=mus, lmbdas=lmbdas)
def permute(self, perm):
self.likelihood.mus = self.likelihood.mus[perm]
self.likelihood.lmbdas = self.likelihood.lmbdas[perm]
def empirical_bayes(self, lr=np.array([0., 0., 1e-3, 1e-3])):
grad = self.prior.log_likelihood_grad(self.likelihood.params)
self.prior.params = [p + r * g for p, g, r in zip(self.prior.params, grad, lr)]
class BayesianInitDiagonalGaussianObservation(_BayesianInitGaussianObservationBase):
# mu = np.zeros((obs_dim,))
# kappa = 1e-64 * np.ones((obs_dim,))
# alpha = ((obs_dim + 1) + obs_dim + 1) / 2. * np.ones((obs_dim,))
# beta = 1. / (2. * 1e8 * np.ones((obs_dim,)) / (obs_dim + 1))
#
# from sds.distributions.composite import StackedNormalGamma
# prior = StackedNormalGamma(nb_states, obs_dim,
# mus=np.array([mu for _ in range(nb_states)]),
# kappas=np.array([kappa for _ in range(nb_states)]),
# alphas=np.array([alpha for _ in range(nb_states)]),
# betas=np.array([beta for _ in range(nb_states)]))
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags, prior, likelihood=None):
super(BayesianInitDiagonalGaussianObservation, self).__init__(nb_states, obs_dim, act_dim,
nb_lags, prior, likelihood)
# Diagonal Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mus, lmbdas_diag = self.prior.rvs()
self.likelihood = StackedGaussiansWithDiagonalPrecision(size=self.nb_states,
dim=self.obs_dim,
mus=mus, lmbdas_diag=lmbdas_diag)
def permute(self, perm):
self.likelihood.mus = self.likelihood.mus[perm]
self.likelihood.lmbdas_diag = self.likelihood.lmbdas_diag[perm]
def empirical_bayes(self, lr=np.array([0., 0., 1e-3, 1e-3])):
pass
class BayesianInitGaussianControl:
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags, prior, degree=1, likelihood=None):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.degree = degree
self.feat_dim = int(sc.special.comb(self.degree + self.obs_dim, self.degree)) - 1
self.basis = PolynomialFeatures(self.degree, include_bias=False)
self.input_dim = self.feat_dim + 1
self.output_dim = self.act_dim
self.prior = prior
self.posterior = copy.deepcopy(prior)
# Linear-Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
As, lmbdas = self.prior.rvs()
self.likelihood = StackedLinearGaussiansWithPrecision(size=self.nb_states,
column_dim=self.input_dim,
row_dim=self.output_dim,
As=As, lmbdas=lmbdas, affine=True)
@property
def params(self):
return self.likelihood.params
@params.setter
def params(self, values):
self.likelihood.params = values
def permute(self, perm):
self.likelihood.As = self.likelihood.As[perm]
self.likelihood.lmbdas = self.likelihood.lmbdas[perm]
def initialize(self, x, u, **kwargs):
kmeans = kwargs.get('kmeans', False)
x0, u0 = [], []
for _x, _u in zip(x, u):
x0.append(_x[:self.nb_lags])
u0.append(_u[:self.nb_lags])
f0 = list(map(self.featurize, x0))
t = list(map(len, f0))
if kmeans:
from sklearn.cluster import KMeans
km = KMeans(self.nb_states)
km.fit(np.vstack(f0))
z0 = np.split(km.labels_, np.cumsum(t)[:-1])
else:
z0 = list(map(partial(npr.choice, self.nb_states), t))
z0 = list(map(partial(one_hot, self.nb_states), z0))
stats = self.likelihood.weighted_statistics(f0, u0, z0)
self.posterior.nat_param = self.prior.nat_param + stats
self.likelihood.params = self.posterior.rvs()
def featurize(self, x):
feat = self.basis.fit_transform(np.atleast_2d(x))
return np.squeeze(feat) if x.ndim == 1\
else np.reshape(feat, (x.shape[0], -1))
def mean(self, z, x):
feat = self.featurize(x)
u = self.likelihood.dists[z].mean(feat)
return np.atleast_1d(u)
def sample(self, z, x):
feat = self.featurize(x)
u = self.likelihood.dists[z].rvs(feat)
return np.atleast_1d(u)
def log_likelihood(self, x, u):
if isinstance(x, np.ndarray):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
f0 = self.featurize(x0)
return self.likelihood.log_likelihood(f0, u0)
else:
return list(map(self.log_likelihood, x, u))
def mstep(self, p, x, u, **kwargs):
x0, u0, p0 = [], [], []
for _x, _u, _p in zip(x, u, p):
x0.append(_x[:self.nb_lags])
u0.append(_u[:self.nb_lags])
p0.append(_p[:self.nb_lags])
f0 = list(map(self.featurize, x0))
stats = self.likelihood.weighted_statistics(f0, u0, p0)
self.posterior.nat_param = self.prior.nat_param + stats
self.likelihood.params = self.posterior.mode()
def smooth(self, p, x, u):
if all(isinstance(i, np.ndarray) for i in [p, x, u]):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
p0 = p[:self.nb_lags]
mu = np.zeros((len(u0), self.nb_states, self.obs_dim))
for k in range(self.nb_states):
mu[:, k, :] = self.mean(k, x0)
return np.einsum('nk,nkl->nl', p0, mu)
else:
return list(map(self.smooth, p, x, u))
class BayesianInitGaussianControlWithAutomaticRelevance:
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags, prior, degree=1):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.degree = degree
self.feat_dim = int(sc.special.comb(self.degree + self.obs_dim, self.degree)) - 1
self.basis = PolynomialFeatures(self.degree, include_bias=False)
self.input_dim = self.feat_dim + 1
self.output_dim = self.act_dim
likelihood_precision_prior = prior['likelihood_precision_prior']
parameter_precision_prior = prior['parameter_precision_prior']
from sds.distributions.composite import StackedMultiOutputLinearGaussianWithAutomaticRelevance
self.object = StackedMultiOutputLinearGaussianWithAutomaticRelevance(self.nb_states,
self.input_dim,
self.output_dim,
likelihood_precision_prior,
parameter_precision_prior)
@property
def params(self):
return self.object.params
@params.setter
def params(self, values):
self.object.params = values
def permute(self, perm):
self.object.As = self.object.As[perm]
self.object.lmbdas = self.object.lmbdas[perm]
def initialize(self, x, u, **kwargs):
pass
def featurize(self, x):
feat = self.basis.fit_transform(np.atleast_2d(x))
return np.squeeze(feat) if x.ndim == 1\
else np.reshape(feat, (x.shape[0], -1))
def mean(self, z, x):
feat = self.featurize(x)
u = self.object.mean(z, feat)
return np.atleast_1d(u)
def sample(self, z, x):
feat = self.featurize(x)
u = self.object.rvs(z, feat)
return np.atleast_1d(u)
def log_likelihood(self, x, u):
if isinstance(x, np.ndarray) and isinstance(u, np.ndarray):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
f0 = self.featurize(x0)
return self.object.log_likelihood(f0, u0)
else:
def inner(x, u):
return self.log_likelihood(x, u)
return list(map(inner, x, u))
def mstep(self, p, x, u, **kwargs):
x0, u0, p0 = [], [], []
for _x, _u, _p in zip(x, u, p):
x0.append(_x[:self.nb_lags])
u0.append(_u[:self.nb_lags])
p0.append(_p[:self.nb_lags])
f0 = list(map(self.featurize, x0))
f0, u0, p0 = list(map(np.vstack, (f0, u0, p0)))
self.object.em(f0, u0, p0, **kwargs)
def smooth(self, p, x, u):
if all(isinstance(i, np.ndarray) for i in [p, x, u]):
x0 = x[:self.nb_lags]
u0 = u[:self.nb_lags]
p0 = p[:self.nb_lags]
mu = np.zeros((len(x), self.nb_states, self.act_dim))
for k in range(self.nb_states):
mu[:, k, :] = self.mean(k, x0)
return np.einsum('nk,nkl->nl', p0, mu)
else:
return list(map(self.smooth, p, x, u))
class _BayesianInitGaussianLatentBase:
def __init__(self, ltn_dim, act_dim,
nb_lags, prior, likelihood=None):
assert nb_lags > 0
self.ltn_dim = ltn_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.prior = prior
self.posterior = copy.deepcopy(prior)
self.likelihood = likelihood
@property
def params(self):
return self.likelihood.params
@params.setter
def params(self, values):
self.likelihood.params = values
def initialize(self, x, **kwargs):
pass
def mstep(self, stats, **kwargs):
self.posterior.nat_param = self.prior.nat_param + stats
self.likelihood.params = self.posterior.mode()
class SingleBayesianInitGaussianLatent(_BayesianInitGaussianLatentBase):
# mu = np.zeros((ltn_dim,))
# kappa = 1e-64
# psi = 1e8 * np.eye(ltn_dim) / (ltn_dim + 1)
# nu = (ltn_dim + 1) + ltn_dim + 1
#
# from sds.distributions.composite import NormalWishart
# prior = NormalWishart(ltn_dim,
# mu=mu, kappa=kappa,
# psi=psi, nu=nu)
def __init__(self, ltn_dim, act_dim,
nb_lags, prior, likelihood=None):
super(SingleBayesianInitGaussianLatent, self).__init__(ltn_dim, act_dim,
nb_lags, prior, likelihood)
# Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mu, lmbda = self.prior.rvs()
self.likelihood = GaussianWithPrecision(dim=self.ltn_dim,
mu=mu, lmbda=lmbda)
class SingleBayesianInitDiagonalGaussianLatent(_BayesianInitGaussianLatentBase):
# mu = np.zeros((ltn_dim,))
# kappa = 1e-64 * np.ones((ltn_dim,))
# alpha = ((ltn_dim + 1) + ltn_dim + 1) / 2. * np.ones((ltn_dim,))
# beta = 1. / (2. * 1e8 * np.ones((ltn_dim,)) / (ltn_dim + 1))
#
# from sds.distributions.composite import NormalGamma
# prior = NormalGamma(ltn_dim,
# mu=mu, kappa=kappa,
# alphas=alpha, betas=beta)
def __init__(self, ltn_dim, act_dim,
nb_lags, prior, likelihood=None):
super(SingleBayesianInitDiagonalGaussianLatent, self).__init__(ltn_dim, act_dim,
nb_lags, prior, likelihood)
# Diagonal Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mu, lmbda_diag = self.prior.rvs()
self.likelihood = GaussianWithDiagonalPrecision(dim=self.ltn_dim,
mu=mu, lmbda_diag=lmbda_diag)
class BayesianInitGaussianLatent(_BayesianInitGaussianLatentBase):
# mu = np.zeros((ltn_dim,))
# kappa = 1e-64
# psi = 1e8 * np.eye(ltn_dim) / (ltn_dim + 1)
# nu = (ltn_dim + 1) + ltn_dim + 1
#
# from sds.distributions.composite import StackedNormalWishart
# prior = StackedNormalWishart(nb_states, ltn_dim,
# mus=np.array([mu for _ in range(nb_states)]),
# kappas=np.array([kappa for _ in range(nb_states)]),
# psis=np.array([psi for _ in range(nb_states)]),
# nus=np.array([nu for _ in range(nb_states)]))
def __init__(self, nb_states, ltn_dim, act_dim,
nb_lags, prior, likelihood=None):
super(BayesianInitGaussianLatent, self).__init__(ltn_dim, act_dim,
nb_lags, prior, likelihood)
self.nb_states = nb_states
# Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mus, lmbdas = self.prior.rvs()
self.likelihood = StackedGaussiansWithPrecision(size=self.nb_states,
dim=self.ltn_dim,
mus=mus, lmbdas=lmbdas)
def permute(self, perm):
pass
class BayesianInitDiagonalGaussianLatent(_BayesianInitGaussianLatentBase):
# mu = np.zeros((ltn_dim,))
# kappa = 1e-64 * np.ones((ltn_dim,))
# alpha = ((ltn_dim + 1) + ltn_dim + 1) / 2. * np.ones((ltn_dim,))
# beta = 1. / (2. * 1e8 * np.ones((ltn_dim,)) / (ltn_dim + 1))
#
# from sds.distributions.composite import StackedNormalGamma
# prior = StackedNormalGamma(nb_states, ltn_dim,
# mus=np.array([mu for _ in range(nb_states)]),
# kappas=np.array([kappa for _ in range(nb_states)]),
# alphas=np.array([alpha for _ in range(nb_states)]),
# betas=np.array([beta for _ in range(nb_states)]))
def __init__(self, nb_states, ltn_dim, act_dim,
nb_lags, prior, likelihood=None):
super(BayesianInitDiagonalGaussianLatent, self).__init__(ltn_dim, act_dim,
nb_lags, prior, likelihood)
self.nb_states = nb_states
# Diagonal Gaussian likelihood
if likelihood is not None:
self.likelihood = likelihood
else:
mus, lmbdas_diag = self.prior.rvs()
self.likelihood = StackedGaussiansWithDiagonalPrecision(size=self.nb_states,
dim=self.ltn_dim,
mus=mus, lmbdas_diag=lmbdas_diag)
def permute(self, perm):
pass
|
419260
|
import urllib2
import argparse
import xmlrpclib
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--resource-id', type=int, required=True)
parser.add_argument('-o', '--output', required=True)
return parser.parse_args()
def fetch(url, retries=4, timeout=5):
for i in xrange(retries):
try:
return urllib2.urlopen(url, timeout=timeout).read()
except Exception:
if i + 1 < retries:
continue
else:
raise
def fetch_resource(id_):
urls = xmlrpclib.ServerProxy("https://sandbox.yandex-team.ru/sandbox/xmlrpc").get_resource_http_links(id_)
for u in urls:
try:
return fetch(u)
except Exception:
continue
raise Exception('Cannot fetch resource {}'.format(id_))
if __name__ == '__main__':
args = parse_args()
with open(args.output, 'wb') as f:
f.write(fetch_resource(int(args.resource_id)))
|
419262
|
from turtle import Turtle
class White(Turtle):
def __init__(self):
super().__init__()
self.shape("square")
self.color("white")
self.shapesize(stretch_len=30,stretch_wid=2)
self.penup()
self.goto(-0,280)
|
419266
|
from flask import Flask
import sys
app = Flask(__name__)
serverName = sys.argv[1]
@app.route('/')
def hello():
return serverName
if __name__ == '__main__':
app.run(port=sys.argv[2])
|
419272
|
import numpy as np
import random
from toolz import partition
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit import transpile, assemble
from qiskit import BasicAer, Aer, execute
from qiskit.quantum_info import state_fidelity
from qiskit.visualization import *
from qiskit.quantum_info.operators import Operator
nqubits=8;
nshot=1000;
def normlaizeData(data):
#Create Array of pixel value
testdata=data
arr_data=testdata.flatten()/max(testdata.flatten());
encoding_data= np.array([np.round(x,6) for x in arr_data]);
sum_const=np.sqrt(sum(encoding_data*encoding_data))
encoding_norm=encoding_data/sum_const
return encoding_norm
def buildCicuit(encoding):
qr = QuantumRegister(nqubits)
cr = ClassicalRegister(nqubits)
qc = QuantumCircuit(qr, name='Initialization')
qc.initialize(encoding, range(nqubits))
my_inst = qc.to_instruction()
my_circuit = QuantumCircuit(qr,cr)
my_circuit.append(my_inst, range(nqubits))
my_circuit.measure(qr[:],cr[:])
return my_circuit
def runCircuit(circuit):
backend = Aer.get_backend('qasm_simulator')
job = execute(circuit, backend, shots=nshot)
result = job.result()
count =result.get_counts()
return count
def countBitstring(count):
f=[]
for i in range(2**nqubits):
if format(i, '#010b')[2:10] in count:
f.append(count[format(i, '#010b')[2:10]])
else:
f.append(0)
return np.array(list(f))
def imgize(listdata):
out_img=np.array(list(partition(16, listdata)));
return out_img/max(listdata)
def findCutOff(img,target):
f=[]
for i in range(100):
f.append(1-sum(sum((abs(target - 1.0 * (img > i/100)))/(2**nqubits))))
mx=max(f);
index=f.index(mx);
rethres=1.0 * (img > index/100)
return rethres
def accuracy(img,target):
acc=1-sum(sum((abs(target-img))))/(2**nqubits)
return acc
def AmpltudeEncoding(input,target):
encoding_norm=normlaizeData(input)
qc=buildCicuit(encoding_norm)
count = runCircuit(qc)
listcount=countBitstring(count)
img=imgize(listcount)
acc=accuracy(img,target)
return img,acc
#filter is in %
def countBitstringFilter(count,filter):
f=[]
thrs = nshot*filter
for i in range(2**nqubits):
if format(i, '#010b')[2:10] in count:
if count[format(i, '#010b')[2:10]] > thrs:
f.append(1)
else:
f.append(0)
else:
f.append(0)
return np.array(list(f))
def AmpltudeEncodingFilter(input,filter):
encoding_norm=normlaizeData(input)
qc=buildCicuit(encoding_norm)
count = runCircuit(qc)
listcount=countBitstringFilter(count,filter)
img=imgize(listcount)
return img
#filter is in %
def countBitstringFilter32(count,filter):
nqubits=10;
nshot=10000;
f=[]
thrs = nshot*filter
for i in range(2**nqubits):
if format(i, '#012b')[2:12] in count:
if count[format(i, '#012b')[2:12]] > thrs:
f.append(1)
else:
f.append(0)
else:
f.append(0)
return np.array(list(f))
def imgize32(listdata):
out_img=np.array(list(partition(32, listdata)));
return out_img/max(listdata)
def AmpltudeEncodingFilter32(input,filter):
nqubits=10;
nshot=10000;
encoding_norm=normlaizeData(input)
qc=buildCicuit32(encoding_norm)
count = runCircuit(qc)
listcount=countBitstringFilter32(count,filter)
img=imgize32(listcount)
return img
def buildCicuit32(encoding):
nqubits=10;
nshot=10000;
qr = QuantumRegister(nqubits)
cr = ClassicalRegister(nqubits)
qc = QuantumCircuit(qr, name='Initialization')
qc.initialize(encoding, range(nqubits))
my_inst = qc.to_instruction()
my_circuit = QuantumCircuit(qr,cr)
my_circuit.append(my_inst, range(nqubits))
my_circuit.measure(qr[:],cr[:])
return my_circuit
|
419283
|
import os
import torch
import re
import sys
import logging
import pickle
from dataclasses import dataclass
from io import StringIO
from transformers import AutoModelWithLMHead, AutoTokenizer, PreTrainedTokenizer
from scipy import stats
from torch.nn.utils.rnn import pad_sequence
from typing import List
from torch.utils.data import SequentialSampler, DataLoader, Dataset
logger = logging.getLogger(__name__)
def title_perplexity(model, tokenizer, article, device="cuda"):
max_length = model.config.n_positions
article_tokens = tokenizer.tokenize(article.text)
title_tokens = tokenizer.tokenize(title_tokenization(article.title))
tokens = article_tokens[: (max_length - len(title_tokens) - 1)] + title_tokens
token_ids = [tokenizer.eos_token_id] + tokenizer.convert_tokens_to_ids(tokens)
with torch.no_grad():
tensor_input = torch.tensor([token_ids], device=device)
loss, logits, *_ = model(tensor_input, labels=tensor_input)
# TODO: probably should just make this count actual title tokensstats
title_offset = len(tokens) - len(title_tokens)
lp = 0
n = 0
for i, input in enumerate(tensor_input[0][title_offset:]):
predicted_score = logits[0, i]
predicted_prob = torch.nn.functional.softmax(predicted_score, dim=0)
lp += torch.log(predicted_prob[input])
n += 1
title_pp = -lp / n
return title_pp.item()
def lm_eval(model, tokenizer, file_path, device="cuda", block_size=512, batch_size=1):
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)
eval_dataset = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
tokenized = tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
tensorized = torch.tensor(tokenized, dtype=torch.long)
eval_dataset.append(tensorized)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size, collate_fn=collate)
for batch in eval_dataloader:
inputs, labels = (batch, batch)
inputs = inputs.to(device)
labels = labels.to(device)
eval_loss = 0.0
with torch.no_grad():
outputs = model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
perplexity = torch.exp(torch.tensor(eval_loss))
return perplexity
def perplexity(model, tokenizer, sentences, device="cuda", **fwd_args):
with torch.no_grad():
token_ids = [
torch.tensor([tokenizer.eos_token_id] + tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentence)))
for sentence in sentences
]
padded_tokens = pad_sequence(token_ids, batch_first=True)
tensor_input = padded_tokens.to(device)
loss, logits, *_ = model(tensor_input, labels=tensor_input, **fwd_args)
lp = 0
n = 0
for i, input in enumerate(tensor_input[0][1:]):
masked_index = i
predicted_score = logits[0, masked_index]
predicted_prob = torch.nn.functional.softmax(predicted_score, dim=0)
lp += torch.log(predicted_prob[input])
n += 1
return -loss
def run_title_evaluation(model, tokenizer, path, limit=None):
title_pp = []
with open(path) as f:
for article in refine_wikitext(f, limit=limit):
title_pp.append(title_perplexity(model, tokenizer, article))
return stats.describe(title_pp)
|
419299
|
from typing import Union
from pathlib import Path
from typeguard import check_argument_types
import os
import glob
from datetime import datetime
import shutil
import logging
import numpy as np
import torch
from onnxruntime.quantization import quantize_dynamic, QuantType
from espnet2.bin.asr_inference import Speech2Text
from espnet2.text.sentencepiece_tokenizer import SentencepiecesTokenizer
from espnet_model_zoo.downloader import ModelDownloader
from .models import (
get_encoder,
get_decoder,
RNNDecoder,
PreDecoder,
CTC,
LanguageModel,
JointNetwork,
)
from .get_config import (
get_ngram_config,
get_beam_config,
get_token_config,
get_tokenizer_config,
get_weights_transducer,
get_trans_beam_config,
)
from espnet_onnx.utils.config import (
save_config,
update_model_path
)
class ModelExport:
def __init__(self, cache_dir: Union[Path, str] = None):
assert check_argument_types()
if cache_dir is None:
cache_dir = Path.home() / ".cache" / "espnet_onnx"
self.cache_dir = Path(cache_dir)
def export(
self,
model: Speech2Text,
tag_name: str = None,
quantize: bool = False,
verbose: bool = False,
):
assert check_argument_types()
if tag_name is None:
tag_name = datetime.now().strftime("%Y%m%d_%H%M%S")
base_dir = self.cache_dir / tag_name.replace(' ', '-')
export_dir = base_dir / 'full'
export_dir.mkdir(parents=True, exist_ok=True)
# copy model files
self._copy_files(model, base_dir, verbose)
model_config = self._create_config(model, export_dir)
# export encoder
enc_model = get_encoder(model.asr_model.encoder)
enc_out_size = enc_model.get_output_size()
self._export_encoder(enc_model, export_dir, verbose)
model_config.update(encoder=enc_model.get_model_config(
model.asr_model, export_dir))
# export decoder
dec_model = get_decoder(model.asr_model.decoder)
self._export_decoder(dec_model, enc_out_size, export_dir, verbose)
model_config.update(decoder=dec_model.get_model_config(export_dir))
# export joint_network if transducer decoder is used.
if model.asr_model.use_transducer_decoder:
joint_network = JointNetwork(
model.asr_model.joint_network,
model_config['beam_search']['search_type'],
)
self._export_joint_network(joint_network, export_dir, verbose)
model_config.update(joint_network=joint_network.get_model_config(export_dir))
# export ctc
ctc_model = CTC(model.asr_model.ctc.ctc_lo)
self._export_ctc(ctc_model, enc_out_size, export_dir, verbose)
model_config.update(ctc=ctc_model.get_model_config(export_dir))
# export lm
export_lm = False
if not model.asr_model.use_transducer_decoder:
if 'lm' in model.beam_search.full_scorers.keys():
export_lm = True
else:
if model.beam_search_transducer.use_lm:
export_lm = True
if export_lm:
lm_model = LanguageModel(model.beam_search.full_scorers['lm'])
self._export_lm(lm_model, export_dir, verbose)
model_config.update(lm=lm_model.get_model_config(export_dir))
else:
model_config.update(lm=dict(use_lm=False))
if quantize:
quantize_dir = base_dir / 'quantize'
quantize_dir.mkdir(exist_ok=True)
qt_config = self._quantize_model(export_dir, quantize_dir, verbose)
for m in qt_config.keys():
if 'predecoder' in m:
model_idx = int(m.split('_')[1])
model_config['decoder']['predecoder'][model_idx].update(
quantized_model_path=qt_config[m])
else:
model_config[m].update(quantized_model_path=qt_config[m])
config_name = base_dir / 'config.yaml'
save_config(model_config, config_name)
update_model_path(tag_name, base_dir)
def export_from_pretrained(self, tag_name: str, quantize: bool = False):
assert check_argument_types()
model = Speech2Text.from_pretrained(tag_name)
self.export(model, tag_name, quantize)
def export_from_zip(self, path: Union[Path, str], tag_name: str, quantize: bool = False):
assert check_argument_types()
cache_dir = Path(path).parent
d = ModelDownloader(cache_dir)
model_config = d.unpack_local_file(path)
model = Speech2Text(**model_config)
self.export(model, tag_name, quantize)
def _create_config(self, model, path):
ret = {}
if not model.asr_model.use_transducer_decoder:
if "ngram" in list(model.beam_search.full_scorers.keys()) \
+ list(model.beam_search.part_scorers.keys()):
ret.update(ngram=get_ngram_config(model))
else:
ret.update(ngram=dict(use_ngram=False))
ret.update(weights=model.beam_search.weights)
ret.update(beam_search=get_beam_config(
model.beam_search, model.minlenratio, model.maxlenratio))
else:
ret.update(weights=get_weights_transducer(
model.beam_search_transducer))
ret.update(beam_search=get_trans_beam_config(
model.beam_search_transducer
))
ret.update(transducer=dict(use_transducer_decoder=model.asr_model.use_transducer_decoder))
ret.update(token=get_token_config(model.asr_model))
ret.update(tokenizer=get_tokenizer_config(model.tokenizer, path))
return ret
def _export_model(self, model, file_name, verbose, enc_size=None):
if enc_size:
dummy_input = model.get_dummy_inputs(enc_size)
else:
dummy_input = model.get_dummy_inputs()
torch.onnx.export(
model,
dummy_input,
file_name,
verbose=verbose,
opset_version=11,
input_names=model.get_input_names(),
output_names=model.get_output_names(),
dynamic_axes=model.get_dynamic_axes()
)
def _export_encoder(self, model, path, verbose):
file_name = os.path.join(path, 'encoder.onnx')
if verbose:
logging.info(f'Encoder model is saved in {file_name}')
self._export_model(model, file_name, verbose)
def _export_decoder(self, model, enc_size, path, verbose):
file_name = os.path.join(path, 'decoder.onnx')
if verbose:
logging.info(f'Decoder model is saved in {file_name}')
self._export_model(model, file_name, verbose, enc_size)
# if decoder is RNNDecoder, then export predecoders
if isinstance(model, RNNDecoder):
self._export_predecoder(model, path, verbose)
def _export_predecoder(self, model, path, verbose):
if verbose:
logging.info(f'Pre-Decoder model is saved in {path}.' \
+ f'There should be {len(model.model.att_list)} files.')
for i, att in enumerate(model.model.att_list):
att_model = PreDecoder(att)
if att_model.require_onnx():
file_name = os.path.join(path, f'predecoder_{i}.onnx')
self._export_model(att_model, file_name, verbose)
def _export_ctc(self, model, enc_size, path, verbose):
file_name = os.path.join(path, 'ctc.onnx')
if verbose:
logging.info(f'CTC model is saved in {file_name}')
self._export_model(model, file_name, verbose, enc_size)
def _export_lm(self, model, path, verbose):
file_name = os.path.join(path, 'lm.onnx')
if verbose:
logging.info(f'LM model is saved in {file_name}')
self._export_model(model, file_name, verbose)
def _export_joint_network(self, model, path, verbose):
file_name = os.path.join(path, 'joint_network.onnx')
if verbose:
logging.info(f'JointNetwork model is saved in {file_name}')
self._export_model(model, file_name, verbose)
def _copy_files(self, model, path, verbose):
# copy stats file
if model.asr_model.normalize is not None \
and hasattr(model.asr_model.normalize, 'stats_file'):
stats_file = model.asr_model.normalize.stats_file
shutil.copy(stats_file, path)
if verbose:
logging.info(f'`stats_file` was copied into {path}.')
# copy bpemodel
if isinstance(model.tokenizer, SentencepiecesTokenizer):
bpemodel_file = model.tokenizer.model
shutil.copy(bpemodel_file, path)
if verbose:
logging.info(f'bpemodel was copied into {path}.')
# save position encoder parameters.
if hasattr(model.asr_model.encoder, 'pos_enc'):
np.save(
path / 'pe',
model.asr_model.encoder.pos_enc.pe.numpy()
)
if verbose:
logging.info(f'Matrix for position encoding was copied into {path}.')
def _quantize_model(self, model_from, model_to, verbose):
if verbose:
logging.info(f'Quantized model is saved in {model_to}.')
ret = {}
models = glob.glob(os.path.join(model_from, "*.onnx"))
for m in models:
basename = os.path.basename(m).split('.')[0]
export_file = os.path.join(model_to, basename + '_qt.onnx')
quantize_dynamic(
m,
export_file,
weight_type=QuantType.QUInt8
)
ret[basename] = export_file
os.remove(os.path.join(model_from, basename + '-opt.onnx'))
return ret
|
419348
|
import os
from io import open
try:
get_input = raw_input # fix for Python 2
except NameError:
get_input = input
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # Python 2 backport
def __create_dir(path):
dir_name = os.path.dirname(path)
if dir_name is not "":
Path(dir_name).mkdir(exist_ok=True)
def save_to_file(code_blocks, verbose=False, force=False, output_dest=None):
for path, value in code_blocks.items():
path = os.path.expanduser(path)
if output_dest is not None:
path = output_dest + "/" + os.path.basename(path)
__create_dir(path)
if os.path.isfile(path) and not force:
overwrite = get_input("'{0}' already exists. Overwrite? (Y/n) ".format(path))
if overwrite != "" and overwrite.lower() != "y":
continue
with open(path, "w", encoding="utf8") as f:
f.write(value)
f.close()
if verbose:
print("{0: <50} {1} lines".format(path, len(value.splitlines())))
|
419388
|
import datetime
import os
import re
import subprocess
import click
def abort(message, *args, **kwargs):
raise click.ClickException(message.format(*args, **kwargs))
def confirm(message, *args, **kwargs):
rv = click.prompt(message.format(*args, **kwargs), type=click.Choice(['y', 'n']))
if rv != 'y':
abort('aborted')
def run(*args, **kwargs):
return subprocess.run(*args, check=True, **kwargs)
def run_no_venv(*args, **kwargs):
env = os.environ.copy()
virtual_env = env.pop('VIRTUAL_ENV', None)
if virtual_env:
env['PATH'] = ':'.join(
p for p in env['PATH'].split(':') if not p.startswith(virtual_env)
)
return run(*args, env=env, **kwargs)
def tox():
run_no_venv('tox -p all', shell=True)
def build():
run('rm -rf dist/', shell=True)
run('python -m build', shell=True)
def path_sub(pattern, repl, path):
with open(path) as f:
text = f.read()
sub_count = 0
def update(match):
nonlocal sub_count
sub_count += 1
prefix, old, suffix = match.groups()
return prefix + repl + suffix
text = re.sub(pattern, update, text)
assert sub_count == 1
with open(path, 'w') as f:
f.write(text)
INIT_VERSION_RE = r'(\n__version__ = )(.*?)(\n)'
def update_init_version(version):
path_sub(INIT_VERSION_RE, repr(version), 'src/reader/__init__.py')
def update_changelog_date(version, date):
title = 'Version {}'.format(version)
path_sub(
r'(\n{}\n{}\n\n)(Unreleased)(\n)'.format(re.escape(title), '-' * len(title)),
'Released ' + str(date.date()),
'CHANGES.rst',
)
def add_changelog_section(version, new_version):
title = 'Version {}'.format(version)
new_title = 'Version {}'.format(new_version)
path_sub(
r'()()(\n{}\n{}\n\n)'.format(re.escape(title), '-' * len(title)),
'\n{}\n{}\n\nUnreleased\n\n'.format(new_title, '-' * len(new_title)),
'CHANGES.rst',
)
def commit(message):
run(['git', 'commit', '-a', '-m', message])
def push():
run(['git', 'push'])
def check_uncommited():
p = run(
['git', 'status', '--untracked-files=no', '--porcelain'],
stdout=subprocess.PIPE,
universal_newlines=True,
)
if p.stdout.strip():
abort("uncommited changes\n\n{}\n", p.stdout.strip('\n'))
def check_unpushed():
p = run(
['git', 'log', '@{u}..', '--format=oneline'],
stdout=subprocess.PIPE,
universal_newlines=True,
)
if p.stdout.strip():
abort("unpushed changes\n\n{}\n", p.stdout.strip('\n'))
def upload_to_pypi():
run('twine upload dist/*', shell=True)
def add_and_push_tags(tags):
for tag in tags:
run(['git', 'tag', '--force', tag])
# https://stackoverflow.com/a/19300065
refs = [f'refs/tags/{tag}:refs/tags/{tag}' for tag in tags]
run(['git', 'push', '--force', 'origin'] + refs)
@click.command()
@click.argument('version')
@click.argument('new_version')
@click.option('--date', type=click.DateTime(), default=str(datetime.date.today()))
def main(version, new_version, date):
check_uncommited()
check_unpushed()
update_init_version(version)
update_changelog_date(version, date)
commit("Release {}.".format(version))
tox()
confirm("Push version {}?", version)
push()
confirm("Wait for GitHub Actions / Read the Docs builds to pass.")
confirm("Upload to PyPI?")
build()
upload_to_pypi()
version_x = version.partition('.')[0] + '.x'
tags = [version, version_x]
confirm(f"Add and push tags ({', '.join(tags)})?")
add_and_push_tags(tags)
confirm("Create release {} in GitHub.", version)
new_version_full = "{}.dev0".format(new_version)
update_init_version(new_version_full)
add_changelog_section(version, new_version)
commit("Bump version to {}.".format(new_version_full))
confirm("Push version {}?", new_version_full)
push()
# TODO: I just enabled branch or tag creation/deletion for the RtD webhook in GitHub, this might not be needed next time.
confirm(
f"Trigger Read the Docs build for {version_x} (doesn't happen automatically)."
)
if __name__ == '__main__':
main()
|
419427
|
from django.contrib.auth.models import AnonymousUser
from rest_framework import authentication
from rest_framework.authentication import TokenAuthentication
class AnonymousAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
"""
Authenticate the request for anyone!
"""
return AnonymousUser(), None
class AnonymousOrAuthenticatedAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
"""
Authenticate the request for anyone or if a valid token is provided, a user.
"""
try:
return TokenAuthentication.authenticate(TokenAuthentication(), request)
except:
return AnonymousUser(), None
|
419434
|
from sqlite_utils import recipes
import json
import pytest
@pytest.fixture
def dates_db(fresh_db):
fresh_db["example"].insert_all(
[
{"id": 1, "dt": "5th October 2019 12:04"},
{"id": 2, "dt": "6th October 2019 00:05:06"},
{"id": 3, "dt": ""},
{"id": 4, "dt": None},
],
pk="id",
)
return fresh_db
def test_parsedate(dates_db):
dates_db["example"].convert("dt", recipes.parsedate)
assert list(dates_db["example"].rows) == [
{"id": 1, "dt": "2019-10-05"},
{"id": 2, "dt": "2019-10-06"},
{"id": 3, "dt": ""},
{"id": 4, "dt": None},
]
def test_parsedatetime(dates_db):
dates_db["example"].convert("dt", recipes.parsedatetime)
assert list(dates_db["example"].rows) == [
{"id": 1, "dt": "2019-10-05T12:04:00"},
{"id": 2, "dt": "2019-10-06T00:05:06"},
{"id": 3, "dt": ""},
{"id": 4, "dt": None},
]
@pytest.mark.parametrize(
"recipe,kwargs,expected",
(
("parsedate", {}, "2005-03-04"),
("parsedate", {"dayfirst": True}, "2005-04-03"),
("parsedatetime", {}, "2005-03-04T00:00:00"),
("parsedatetime", {"dayfirst": True}, "2005-04-03T00:00:00"),
),
)
def test_dayfirst_yearfirst(fresh_db, recipe, kwargs, expected):
fresh_db["example"].insert_all(
[
{"id": 1, "dt": "03/04/05"},
],
pk="id",
)
fresh_db["example"].convert(
"dt", lambda value: getattr(recipes, recipe)(value, **kwargs)
)
assert list(fresh_db["example"].rows) == [
{"id": 1, "dt": expected},
]
@pytest.mark.parametrize("delimiter", [None, ";", "-"])
def test_jsonsplit(fresh_db, delimiter):
fresh_db["example"].insert_all(
[
{"id": 1, "tags": (delimiter or ",").join(["foo", "bar"])},
{"id": 2, "tags": (delimiter or ",").join(["bar", "baz"])},
],
pk="id",
)
fn = recipes.jsonsplit
if delimiter is not None:
def fn(value):
return recipes.jsonsplit(value, delimiter=delimiter)
fresh_db["example"].convert("tags", fn)
assert list(fresh_db["example"].rows) == [
{"id": 1, "tags": '["foo", "bar"]'},
{"id": 2, "tags": '["bar", "baz"]'},
]
@pytest.mark.parametrize(
"type,expected",
(
(None, ["1", "2", "3"]),
(float, [1.0, 2.0, 3.0]),
(int, [1, 2, 3]),
),
)
def test_jsonsplit_type(fresh_db, type, expected):
fresh_db["example"].insert_all(
[
{"id": 1, "records": "1,2,3"},
],
pk="id",
)
fn = recipes.jsonsplit
if type is not None:
def fn(value):
return recipes.jsonsplit(value, type=type)
fresh_db["example"].convert("records", fn)
assert json.loads(fresh_db["example"].get(1)["records"]) == expected
|
419477
|
import os
from pyats.easypy import run
# To run the job:
# pyats run job $VIRTUAL_ENV/examples/connection/job/connection_example_job.py \
# --testbed-file <your tb file>
#
# Description: This example uses a sample testbed, connects to a device
# which name is passed from the job file,
# and executes some commands.
# All run() must be inside a main function
def main():
# Find the location of the script in relation to the job file
test_path = os.path.dirname(os.path.abspath(__file__))
testscript = os.path.join(test_path, 'connection_example_script.py')
# Execute the testscript
run(testscript=testscript)
|
419484
|
import argparse
import json
import math
import os
import pdb
import shutil
import time
from functools import partial
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
import resnet
from relax.nas import MixedOptimizer, Supernet
from relax.xd import fixed, original
class RowColPermute(nn.Module):
def __init__(self, row, col):
super().__init__()
try:
from torch_butterfly.permutation import bitreversal_permutation
self.rowperm = torch.LongTensor(bitreversal_permutation(row))
self.colperm = torch.LongTensor(bitreversal_permutation(col))
print("Using bit-reversal permutation")
except ImportError:
self.rowperm = torch.randperm(row) if type(row) == int else row
self.colperm = torch.randperm(col) if type(col) == int else col
print("Using random permutation")
def forward(self, tensor):
return tensor[:,self.rowperm][:,:,self.colperm]
model_names = sorted(name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet")
and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--backbone', type=str, default='resnet20')
parser.add_argument('--data', default='cifar10', type=str)
parser.add_argument('--device', default=0, type=int)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='results', type=str)
parser.add_argument('--save-every', dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int, default=10)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--arch-lr', default=0.1, type=float)
parser.add_argument('--arch-adam', action='store_true')
parser.add_argument('--xd', action='store_true')
parser.add_argument('--fft', action='store_true')
parser.add_argument('--compact', action='store_true')
parser.add_argument('--einsum', action='store_true')
parser.add_argument('--kmatrix-depth', default=1, type=int)
parser.add_argument('--warmup-epochs', default=0, type=int)
parser.add_argument('--permute', action='store_true')
parser.add_argument('--get-permute', type=str, default='')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
torch.manual_seed(args.seed)
model = resnet.__dict__[args.backbone](num_classes=int(args.data[5:]))
torch.cuda.set_device(args.device)
criterion = nn.CrossEntropyLoss().cuda()
writer = SummaryWriter(args.save_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.permute or args.get_permute:
if args.get_permute:
permute = torch.load(args.get_permute)['permute']
elif args.resume:
permute = torch.load(args.resume)['permute']
else:
permute = RowColPermute(32, 32)
train_transforms = [transforms.ToTensor(), permute, normalize]
val_transforms = [transforms.ToTensor(), permute, normalize]
else:
permute = None
train_transforms = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize]
val_transforms = [transforms.ToTensor(), normalize]
cifar = datasets.CIFAR100 if args.data == 'cifar100' else datasets.CIFAR10
train_loader = torch.utils.data.DataLoader(
cifar(root='./data', train=True, transform=transforms.Compose(train_transforms), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
cifar(root='./data', train=False, transform=transforms.Compose(val_transforms)),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.half:
model.half()
criterion.half()
if args.fft:
Supernet.create(model, in_place=True)
X, _ = next(iter(train_loader))
arch_kwargs = {'arch': fixed,
'compact': args.compact,
'einsum': args.einsum,
'verbose': not args.resume}
model.conv2xd(X[:1], **arch_kwargs)
if not args.xd:
args.arch_lr = 0.0
print('Model weight count:', sum(p.numel() for p in model.parameters()))
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
model.cuda()
# define optimizer
momentum = partial(torch.optim.SGD, momentum=args.momentum)
optimizer = momentum(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def sched(epoch):
if epoch < 1 and args.backbone in ['resnet1202', 'resnet110']:
return 0.1
return 0.1 ** (epoch >= int(0.5 * args.epochs)) * 0.1 ** (epoch >= int(0.75 * args.epochs))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=sched, last_epoch=-1)
for epoch in range(args.start_epoch):
optimizer.step() and lr_scheduler.step()
if not args.evaluate:
with open(os.path.join(args.save_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
for epoch in range(args.start_epoch, args.epochs):
if args.xd and (epoch == args.warmup_epochs or (args.resume and epoch == args.start_epoch and epoch >= args.warmup_epochs)):
model.cpu()
Supernet.create(model, in_place=True)
X, _ = next(iter(train_loader))
arch_kwargs = {'arch': original,
'compact': args.compact,
'einsum': args.einsum,
'depth': args.kmatrix_depth,
'verbose': not args.resume}
model.conv2xd(X[:1], **arch_kwargs)
print('Arch param count:', sum(p.numel() for p in model.arch_params()))
model.cuda()
arch_opt = torch.optim.Adam if args.arch_adam else momentum
optimizer = MixedOptimizer([momentum(model.model_weights(), lr=args.lr, weight_decay=args.weight_decay),
arch_opt(model.arch_params(), lr=args.arch_lr, weight_decay=0.0 if args.arch_adam else args.weight_decay)])
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=sched, last_epoch=epoch-1)
if args.resume and epoch == args.start_epoch:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optim_state'])
if args.evaluate:
validate(val_loader, model, criterion)
return
writer.add_scalar('hyper/lr', optimizer.param_groups[0]['lr'], epoch)
if args.xd:
writer.add_scalar('hyper/arch', 0.0 if len(optimizer.param_groups) == 1 else optimizer.param_groups[1]['lr'], epoch)
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
writer.add_scalar('train/acc', acc, epoch)
writer.add_scalar('train/loss', loss, epoch)
lr_scheduler.step()
# evaluate on validation set
prec1, loss = validate(val_loader, model, criterion)
writer.add_scalar('valid/acc', prec1, epoch)
writer.add_scalar('valid/loss', loss, epoch)
# remember best prec@1 and save checkpoint
best_prec1 = max(prec1, best_prec1)
model.train()
if (epoch+1) % args.save_every == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_state': optimizer.state_dict(),
'best_prec1': best_prec1,
'permute': permute,
}, os.path.join(args.save_dir, 'checkpoint.th'))
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'permute': permute,
}, os.path.join(args.save_dir, 'model.th'))
try:
model.save_arch(os.path.join(args.save_dir, 'arch.th'))
except AttributeError:
pass
writer.flush()
with open(os.path.join(args.save_dir, 'results.json'), 'w') as f:
json.dump({'final validation accuracy': prec1,
'best validation accuracy': best_prec1,
}, f, indent=4)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Run one train epoch
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
optimizer.zero_grad()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda()
input_var = input.cuda()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
loss.backward()
optimizer.step()
optimizer.zero_grad()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'
.format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(state, filename):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
419500
|
from NERDA.datasets import get_conll_data, get_dane_data
import pandas as pd
import torch
import boto3
def deploy_model_to_s3(model, test_set = get_dane_data('test')):
"""Deploy Model to S3
Args:
model: NERDA model.
test_set: Test set for evaluating performance.
Returns:
str: message saying, if model was uploaded successfully.
Model and text file with performance numbers uploaded
as side-effects.
"""
model_name = type(model).__name__
file_model = f'{model_name}.bin'
torch.save(model.network.state_dict(), file_model)
# compute performance on test set and save.
performance = model.evaluate_performance(test_set)
# write to csv.
file_performance = f'{model_name}_performance.csv'
performance.to_csv(file_performance, index = False)
# upload to S3 bucket.
s3 = boto3.resource('s3')
s3.Bucket('nerda').upload_file(
Filename=file_model,
Key = file_model)
s3.Bucket('nerda').upload_file(
Filename=file_performance,
Key = file_performance)
return "Model deployed to S3 successfully."
if __name__ == '__main__':
from NERDA.precooked import EN_ELECTRA_EN
model = EN_ELECTRA_EN()
model.train()
deploy_model_to_s3(model)
|
419542
|
def get_head(line, releases, **kwargs):
for release in releases:
if "Django {} release notes".format(release) in line:
return release
return False
def get_urls(releases, **kwargs):
urls = []
for release in releases:
urls.append("https://raw.githubusercontent.com/django/django/master/docs/releases/{v}.txt"
.format(v=release))
return urls, []
|
419548
|
import torch
import os
# DALI import
from .dali_iterator import COCOPipeline
from nvidia.dali.plugin.pytorch import DALIGenericIterator
from box_coder import dboxes300_coco
anchors_ltrb_list = dboxes300_coco()("ltrb").numpy().flatten().tolist()
def prebuild_dali_pipeline(args):
train_annotate = os.path.join(args.data, "annotations/bbox_only_instances_train2017.json")
train_coco_root = os.path.join(args.data, "train2017")
pipe = COCOPipeline(args.batch_size * args.input_batch_multiplier,
args.local_rank, train_coco_root,
args.meta_files_path, train_annotate, args.N_gpu,
anchors_ltrb_list,
num_threads=args.num_workers,
output_fp16=args.use_fp16, output_nhwc=args.nhwc,
pad_output=args.pad_input, seed=args.local_seed - 2**31,
use_nvjpeg=args.use_nvjpeg,
dali_cache=args.dali_cache,
dali_async=(not args.dali_sync))
pipe.build()
return pipe
def build_dali_pipeline(args, training=True, pipe=None):
# pipe is prebuilt without touching the data
train_loader = DALIGenericIterator(pipelines=[pipe],
output_map= ['image', 'bbox', 'label'],
size=pipe.epoch_size()['train_reader'] // args.N_gpu,
auto_reset=True)
return train_loader, pipe.epoch_size()['train_reader']
|
419556
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
def draw_boxes(img, boxes, base_color=(1, 0, 0), line_width=3):
base_color = np.array(base_color)
boxes = boxes[np.argsort(-boxes[:, 4])] # Sort in descending order of score
max_score = np.max(boxes[:, 4])
ax = plt.gca()
ax.imshow(img)
for box in boxes:
xmin, ymin, xmax, ymax, score = box
color = base_color * score / max_score
rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=line_width, edgecolor=color, fill=False)
ax.add_patch(rect)
plt.axis('off')
plt.show()
|
419561
|
import numpy as np
from matplotlib import patches
import matplotlib.pyplot as plt
# Use xkcd-style figures.
plt.xkcd()
# Some settings
fs = 14
# # # (A) Figure with survey and computational domains, buffer. # # #
fig, ax = plt.subplots(1, 1, figsize=(11, 7))
# Plot domains.
dinp1 = {'fc': 'none', 'zorder': 2}
dc = patches.Rectangle((0, 0), 100, 60, color='.9')
dcf = patches.Rectangle((0, 0), 100, 60, ec='C0', **dinp1)
ds = patches.Rectangle((15, 10), 70, 40, fc='w')
dsf = patches.Rectangle((15, 10), 70, 40, ec='C1', **dinp1)
for d in [dc, dcf, ds, dsf]:
ax.add_patch(d)
dinp2 = {'verticalalignment': 'center', 'zorder': 2}
ax.text(60, 60, r'Computational domain $D_c$', c='C0', **dinp2)
ax.text(60, 50, r'Survey domain $D_s$', c='C1', **dinp2)
# plot seasurface, seafloor, receivers, source.
x = np.arange(101)
y1 = np.sin(x/np.pi)-np.arange(x.size)/10+43
ax.plot(x, y1, '.8', zorder=1)
ax.plot(x[15:-15], y1[15:-15], '.4', zorder=1)
si = [30, 40, 50, 60, 70]
for i in si:
ax.plot(x[i], y1[i]+4, 'C3*')
ri = np.arange(4, 16)*5
for i in ri:
ax.plot(x[i+2], y1[i+2]+1, 'C0v')
# Subsurface.
y2 = np.sin(x/5)-((np.arange(x.size)-x.size/2)/20)**2+13+np.arange(x.size)/10
y3 = np.sin(x/10)-((np.arange(x.size)-x.size/2)/40)**3+28-np.arange(x.size)/300
y4 = np.min([y3, np.arange(101)/10+22], axis=0)
subinp1 = {'c': '0.8', 'zorder': 1}
subinp2 = {'c': '0.4', 'zorder': 1}
ax.plot(x, y2, **subinp1)
ax.plot(x, y4, **subinp1)
ax.plot(x, y3, **subinp1)
ax.plot(x[15:-15], y2[15:-15], **subinp2)
ax.plot(x[15:-15], y4[15:-15], **subinp2)
ax.plot(x[15:-15], y3[15:-15], **subinp2)
# Lambdas.
aprops = {'head_width': 2, 'head_length': 3,
'length_includes_head': True, 'color': 'C2'}
tprops = {'fontsize': fs, 'c': 'C2', 'verticalalignment': 'center'}
ax.arrow(50, 10, 0, -10, **aprops)
ax.text(51, 5, r'$\lambda(f, \sigma_{z-})$', **tprops)
ax.arrow(50, 50, 0, 10, **aprops, zorder=10)
ax.text(51, 55, r'$\lambda(f, \sigma_{z+})$', **tprops)
ax.arrow(15, 30, -15, 0, **aprops, zorder=10)
ax.text(3, 32, r'$\lambda(f, \sigma_{x-})$', **tprops)
ax.arrow(85, 30, 15, 0, **aprops, zorder=10)
ax.text(88, 32, r'$\lambda(f, \sigma_{x+})$', **tprops)
ax.text(5, 5, 'Buffer', c='.5', fontsize=16)
# Axis
ax.arrow(0, 0, 10, 0, head_width=2, head_length=3, fc='k', ec='k', zorder=10)
ax.arrow(0, 0, 0, 10, head_width=2, head_length=3, fc='k', ec='k', zorder=10)
ax.text(15, 0, r'$x$', fontsize=fs, verticalalignment='center', zorder=10)
ax.text(0, 15, r'$z$', fontsize=fs, horizontalalignment='center', zorder=10)
ax.set_axis_off()
ax.set_xlim([-5, 105])
ax.set_ylim([-5, 65])
fig.savefig('_static/construct_mesh.png', bbox_inches='tight', pad_inches=0)
fig.show()
# # # (B) Figure with survey and computational domains, buffer. # # #
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
# Plot domains
ax.plot([35, 35], [16, 30], 'C0')
ax.plot([30, 30], [0, 14], 'C0')
ax.plot([20, 20], [0, 14], 'C1')
ax.plot([20, 20], [16, 30], 'C1')
# Plot center
ax.plot([5, 5], [6, 23], 'C3*', zorder=11)
# lambdas
aprops2 = {'head_width': 2, 'head_length': 3, 'zorder': 10,
'length_includes_head': True, 'color': 'C2'}
ax.arrow(20, 22, 15, 1, **aprops2)
ax.arrow(35, 24, -15, 1, **aprops2)
ax.arrow(5, 6, 25, 1, **aprops2)
ax.arrow(30, 8, -10, 1, **aprops2)
ax.text(20, 31, r'$D_s$', c='C1', horizontalalignment='center', fontsize=fs)
ax.text(20, -3, r'$D_s$', c='C1', horizontalalignment='center', fontsize=fs)
ax.text(35, 31, r'$D_c$', c='C0', horizontalalignment='center', fontsize=fs)
ax.text(30, -3, r'$D_c$', c='C0', horizontalalignment='center', fontsize=fs)
ax.text(5, 31, r'$center$', c='C3', horizontalalignment='center', fontsize=fs)
ax.text(5, -3, r'$center$', c='C3', horizontalalignment='center', fontsize=fs)
ax.text(25, 15, r'$B$', verticalalignment='center')
ax.text(40, 15, r"$\lambda'=\lambda_{fact}\ \lambda(f, \sigma)$",
verticalalignment='center')
aprops3 = {**aprops2, 'color': 'k'}
ax.arrow(29, 17, -9, 0, **aprops3)
ax.arrow(26, 17, 9, 0, **aprops3)
ax.arrow(27, 13, -7, 0, **aprops3)
ax.arrow(23, 13, 7, 0, **aprops3)
ax.text(0, 25, '(I) False')
ax.text(40, 23, r"$B=\lambda' \leq B_{max}$")
ax.text(0, 9, '(II) True')
ax.text(37, 5, r"$B=(2\lambda'-|D_s-center|)/2$")
ax.text(39, 1, r'$\leq B_{max}-|D_s-center|$')
ax.set_axis_off()
ax.set_xlim([-5, 75])
ax.set_ylim([-10, 40])
fig.savefig('_static/construct_mesh2.png', bbox_inches='tight', pad_inches=0)
fig.show()
|
419603
|
PARTS = {
"1-": [
"PAI", "PAD", "PAN", "PAP", "PAS", "PAO",
"PMI", "PMD", "PMN", "PMP", "PMS", "PMO",
],
"1+": [
"IAI",
"IMI",
"IEI",
],
"2-": [
"FAI", "FAN", "FAP", "FAO",
"FMI", "FMN", "FMP", "FMO",
],
"3-": [
"AAD", "AAN", "AAP", "AAS", "AAO",
"AMD", "AMN", "AMP", "AMS", "AMO",
],
"3+": [
"AAI",
"AMI",
],
"4-": [
"XAI", "XAD", "XAN", "XAP", "XAS", "XAO",
],
"4+": [
"YAI",
],
"5-": [
"XMI", "XMD", "XMN", "XMP",
],
"5+": [
"YMI", "YMP",
],
"6-": [
"APD", "APN", "APP", "APS", "APO",
],
"6+": [
"API",
],
"7-": [
"FPI", "FPN", "FPP", "FPO",
],
"8-": [
"ZAI", "ZAN",
"ZMI", "ZMN",
"ZMP",
],
}
REVERSE_PARTS = {}
for part, tvm_list in PARTS.items():
for tvm in tvm_list:
REVERSE_PARTS[tvm] = part
def key_to_part(key):
return REVERSE_PARTS[key[0:3]]
def trim_multiples(stem_set, part, lemma, parts):
trimmed_stems = set()
for stem in stem_set:
if stem.endswith("0"): # rarely a real stem
pass
elif stem.endswith("@"): # rarely a real stem
pass
elif part[0] == "3" and lemma.endswith(("άω", "έω", "όω", "εύω")) and \
stem.endswith(("{root}", "{athematic}", "{2nd}")):
pass
elif lemma.endswith(("ω", "ομαι")) and stem.endswith("{athematic}"):
pass
elif part[0] == "1" and lemma.endswith("έω") and \
stem.endswith(("ο", "α", "η")):
pass
elif part[0] == "1" and lemma.endswith("όω") and \
stem.endswith(("ε", "α", "η", "{athematic}")):
pass
elif part[0] == "1" and lemma.endswith("άω") and \
stem.endswith(("η", "ε", "ο")):
pass
elif part[0] == "1" and lemma.endswith("έομαι") and stem.endswith("ο"):
pass
elif part[0] == "1" and lemma.endswith("όομαι") and stem.endswith("ε"):
pass
elif part[0] == "1" and lemma.endswith("άομαι") and stem.endswith("η"):
pass
else:
trimmed_stems.add(stem)
if part == "3-" and len(stem_set) == 2:
t = sorted(stem_set)
if t[0] + "{2nd}" == t[1]:
return "{} # @1 2nd?".format(t[0])
if t[1].endswith("ι{2nd}") and t[1] == t[0][:-5] + "ι{2nd}":
return "{} # @1".format(t[0])
if len(trimmed_stems) == 1:
return "{} # @1".format(trimmed_stems.pop())
elif len(trimmed_stems) == 0:
return "{} # @mm".format(stem_set)
else:
return "{} # @m".format("/".join(trimmed_stems))
|
419613
|
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from jmodt.config import cfg
from jmodt.utils import loss_utils
def model_joint_fn_decorator():
ModelReturn = namedtuple("ModelReturn", ['loss', 'tb_dict', 'disp_dict'])
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
def model_fn_train(model, data):
if cfg.RPN.ENABLED:
pts_input = data['pts_input']
gt_boxes3d = data['gt_boxes3d']
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
rpn_reg_label = torch.from_numpy(rpn_reg_label).cuda(non_blocking=True).float()
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs, 'gt_boxes3d': gt_boxes3d}
else:
input_data = {}
for key, val in data.items():
if key != 'sample_id':
input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking=True).float()
if cfg.LI_FUSION.ENABLED:
img = torch.from_numpy(data['img']).cuda(non_blocking=True).float().permute((0, 3, 1, 2))
pts_xy = torch.from_numpy(data['pts_xy']).cuda(non_blocking=True).float()
input_data['img'] = img
input_data['pts_xy'] = pts_xy
if cfg.RPN.USE_RGB or cfg.RCNN.USE_RGB:
pts_rgb = data['rgb']
pts_rgb = torch.from_numpy(pts_rgb).cuda(non_blocking=True).float()
input_data['pts_rgb'] = pts_rgb
if cfg.REID.ENABLED:
input_data['gt_tids'] = torch.from_numpy(data['gt_tids']).cuda(non_blocking=True).float()
ret_dict = model(input_data)
tb_dict = {}
disp_dict = {}
loss = 0
if cfg.RPN.ENABLED and not cfg.RPN.FIXED:
rpn_cls = ret_dict['rpn_cls']
rpn_reg = ret_dict['rpn_reg']
rpn_loss, rpn_loss_cls, rpn_loss_loc, rpn_loss_angle, rpn_loss_size, rpn_loss_iou = get_rpn_loss(
model,
rpn_cls,
rpn_reg,
rpn_cls_label,
rpn_reg_label,
tb_dict
)
rpn_loss = rpn_loss * cfg.TRAIN.RPN_TRAIN_WEIGHT
loss += rpn_loss
disp_dict['rpn_loss'] = rpn_loss.item()
if cfg.RCNN.ENABLED:
if cfg.USE_IOU_BRANCH:
rcnn_loss, iou_loss, iou_branch_loss = get_rcnn_loss(model, ret_dict, tb_dict)
disp_dict['rcnn_iou_loss'] = iou_loss.item()
disp_dict['iou_branch_loss'] = iou_branch_loss.item()
else:
rcnn_loss = get_rcnn_loss(model, ret_dict, tb_dict)
max_iou = ret_dict['max_iou'].tolist()
disp_dict['max_iou'] = [round(x, 2) for x in max_iou] if type(max_iou) == list else max_iou
disp_dict['link_pos'] = tb_dict['rcnn_link_pos']
disp_dict['link_neg'] = tb_dict['rcnn_link_neg']
rcnn_loss = rcnn_loss * cfg.TRAIN.RCNN_TRAIN_WEIGHT
disp_dict['rcnn_loss'] = rcnn_loss.item() if type(rcnn_loss) == torch.Tensor else rcnn_loss
loss += rcnn_loss
return ModelReturn(loss, tb_dict, disp_dict)
def get_rpn_loss(model, rpn_cls, rpn_reg, rpn_cls_label, rpn_reg_label, tb_dict):
if isinstance(model, nn.DataParallel):
rpn_cls_loss_func = model.module.rpn.rpn_cls_loss_func
else:
rpn_cls_loss_func = model.rpn.rpn_cls_loss_func
rpn_cls = rpn_cls.squeeze(-1)
invalid_mask = torch.logical_or(torch.isnan(rpn_cls), torch.isinf(rpn_cls))
if invalid_mask.sum() > 0:
valid_mask = torch.logical_not(invalid_mask)
rpn_cls = rpn_cls[valid_mask]
rpn_cls_label = rpn_cls_label[valid_mask]
invalid_mask = torch.logical_or(torch.isnan(rpn_reg), torch.isinf(rpn_reg))
if invalid_mask.sum() > 0:
invalid_mask = invalid_mask.sum(-1) > 0
valid_mask = torch.logical_not(invalid_mask)
rpn_reg = rpn_reg[valid_mask]
rpn_reg_label = rpn_reg_label[valid_mask]
else:
point_num = rpn_reg.size(0) * rpn_reg.size(1)
rpn_reg = rpn_reg.view(point_num, -1)
rpn_reg_label = rpn_reg_label.view(point_num, 7)
rpn_cls_flat = rpn_cls.view(-1)
rpn_cls_label_flat = rpn_cls_label.view(-1)
fg_mask = (rpn_cls_label_flat > 0)
# RPN classification loss
if cfg.RPN.LOSS_CLS == 'DiceLoss':
rpn_loss_cls = rpn_cls_loss_func(rpn_cls_flat, rpn_cls_label_flat)
elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss':
rpn_cls_target = (rpn_cls_label_flat > 0).float()
pos = (rpn_cls_label_flat > 0).float()
neg = (rpn_cls_label_flat == 0).float()
cls_weights = pos + neg
pos_normalizer = pos.sum()
cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0)
rpn_loss_cls = rpn_cls_loss_func(rpn_cls_flat, rpn_cls_target, cls_weights)
rpn_loss_cls_pos = (rpn_loss_cls * pos).sum()
rpn_loss_cls_neg = (rpn_loss_cls * neg).sum()
rpn_loss_cls = rpn_loss_cls.sum()
tb_dict['rpn_loss_cls_pos'] = rpn_loss_cls_pos.item()
tb_dict['rpn_loss_cls_neg'] = rpn_loss_cls_neg.item()
elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy':
weight = rpn_cls_flat.new(rpn_cls_flat.shape[0]).fill_(1.0)
weight[fg_mask] = cfg.RPN.FG_WEIGHT
rpn_cls_label_target = (rpn_cls_label_flat > 0).float()
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rpn_cls_flat), rpn_cls_label_target,
weight=weight, reduction='none')
cls_valid_mask = (rpn_cls_label_flat >= 0).float()
rpn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
# RPN regression loss
if fg_mask.sum() > 0:
loss_loc, loss_angle, loss_size, loss_iou, reg_loss_dict = \
loss_utils.get_reg_loss(cls_score=torch.sigmoid(rpn_cls_flat)[fg_mask],
pred_reg=rpn_reg[fg_mask],
reg_label=rpn_reg_label[fg_mask],
loc_scope=cfg.RPN.LOC_SCOPE,
loc_bin_size=cfg.RPN.LOC_BIN_SIZE,
num_head_bin=cfg.RPN.NUM_HEAD_BIN,
anchor_size=MEAN_SIZE,
get_xz_fine=cfg.RPN.LOC_XZ_FINE,
use_cls_score=True,
use_mask_score=False)
loss_size = 3 * loss_size # consistent with old codes
loss_iou = cfg.TRAIN.CE_WEIGHT * loss_iou
rpn_loss_reg = loss_loc + loss_angle + loss_size + loss_iou
else:
loss_loc = loss_angle = loss_size = loss_iou = rpn_loss_reg = rpn_loss_cls * 0
rpn_loss = rpn_loss_cls * cfg.RPN.LOSS_WEIGHT[0] + rpn_loss_reg * cfg.RPN.LOSS_WEIGHT[1]
tb_dict.update({'rpn_loss_cls': rpn_loss_cls.item(), 'rpn_loss_reg': rpn_loss_reg.item(),
'rpn_loss': rpn_loss.item(), 'rpn_fg_sum': fg_mask.sum().item()})
# return rpn_loss
return rpn_loss, rpn_loss_cls, loss_loc, loss_angle, loss_size, loss_iou
def get_rcnn_loss(model, ret_dict, tb_dict):
rcnn_cls = ret_dict['rcnn_cls']
rcnn_reg = ret_dict['rcnn_reg']
cls_label = ret_dict['cls_label'].float()
reg_valid_mask = ret_dict['reg_valid_mask']
roi_boxes3d = ret_dict['roi_boxes3d']
roi_size = roi_boxes3d[:, 3:6]
gt_boxes3d_ct = ret_dict['gt_of_rois']
rcnn_cls_flat = rcnn_cls.view(-1)
cls_label_flat = cls_label.view(-1)
if cfg.TRAIN.FINETUNE:
rcnn_loss = 0
else:
# rcnn classification loss
if isinstance(model, nn.DataParallel):
cls_loss_func = model.module.rcnn_net.cls_loss_func
else:
cls_loss_func = model.rcnn_net.cls_loss_func
invalid_mask = torch.logical_or(torch.isnan(rcnn_cls_flat), torch.isinf(rcnn_cls_flat))
if invalid_mask.sum() > 0:
valid_mask = torch.logical_not(invalid_mask)
rcnn_cls_flat = rcnn_cls_flat[valid_mask]
rcnn_reg = rcnn_reg[valid_mask]
cls_label_flat = cls_label_flat[valid_mask]
reg_valid_mask = reg_valid_mask[valid_mask]
roi_size = roi_size[valid_mask]
gt_boxes3d_ct = gt_boxes3d_ct[valid_mask]
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
pos = (cls_label_flat > 0).float()
neg = (cls_label_flat == 0).float()
cls_weights = pos + neg
pos_normalizer = pos.sum()
cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0)
rcnn_loss_cls = cls_loss_func(rcnn_cls_flat, pos, cls_weights)
rcnn_loss_cls = rcnn_loss_cls.sum()
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
batch_loss_cls = F.binary_cross_entropy_with_logits(rcnn_cls_flat, cls_label_flat, reduction='none')
if torch.isnan(batch_loss_cls).any():
print('cls loss nan before ', batch_loss_cls.shape)
cls_valid_mask = (cls_label_flat >= 0)
rcnn_loss_cls = (batch_loss_cls[cls_valid_mask]).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
if torch.isnan(rcnn_loss_cls).any():
print('cls loss nan ', batch_loss_cls[cls_valid_mask].shape)
elif cfg.TRAIN.LOSS_CLS == 'CrossEntropy':
rcnn_cls_reshape = rcnn_cls.view(rcnn_cls.shape[0], -1)
cls_target = cls_label_flat.long()
cls_valid_mask = (cls_label_flat >= 0).float()
batch_loss_cls = cls_loss_func(rcnn_cls_reshape, cls_target)
normalizer = torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls = (batch_loss_cls.mean(dim=1) * cls_valid_mask).sum() / normalizer
else:
raise NotImplementedError
tb_dict['rcnn_loss_cls'] = rcnn_loss_cls.item()
tb_dict['rcnn_cls_fg'] = (cls_label_flat > 0).sum().item()
tb_dict['rcnn_cls_bg'] = (cls_label_flat == 0).sum().item()
# rcnn regression loss
fg_mask = (reg_valid_mask > 0)
if fg_mask.sum() > 0:
if cfg.USE_IOU_BRANCH:
iou_branch_pred = ret_dict['rcnn_iou_branch']
iou_branch_pred_fg_mask = iou_branch_pred[fg_mask]
else:
iou_branch_pred_fg_mask = None
all_anchor_size = roi_size
anchor_size = all_anchor_size[fg_mask] if cfg.RCNN.SIZE_RES_ON_ROI else MEAN_SIZE
loss_loc, loss_angle, loss_size, loss_iou, reg_loss_dict = \
loss_utils.get_reg_loss(cls_score=torch.sigmoid(rcnn_cls_flat)[fg_mask],
pred_reg=rcnn_reg[fg_mask],
reg_label=gt_boxes3d_ct[fg_mask],
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
anchor_size=anchor_size,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True,
use_cls_score=True,
use_mask_score=True,
use_iou_branch=cfg.USE_IOU_BRANCH,
iou_branch_pred=iou_branch_pred_fg_mask)
loss_size = 3 * loss_size # consistent with old codes
# rcnn_loss_reg = loss_loc + loss_angle + loss_size
loss_iou = cfg.TRAIN.CE_WEIGHT * loss_iou
if cfg.USE_IOU_BRANCH:
iou_branch_loss = reg_loss_dict['iou_branch_loss']
rcnn_loss_reg = loss_loc + loss_angle + loss_size + loss_iou + iou_branch_loss
else:
rcnn_loss_reg = loss_loc + loss_angle + loss_size + loss_iou
tb_dict.update(reg_loss_dict)
else:
rcnn_loss_reg = 0
rcnn_loss = rcnn_loss_cls + rcnn_loss_reg
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() if type(rcnn_loss_reg) == torch.Tensor else rcnn_loss_reg
tb_dict['rcnn_reg_fg'] = reg_valid_mask.sum().item()
# rcnn reid loss
if cfg.REID.ENABLED:
link_label_flat = ret_dict['gt_links']
start_label_flat = ret_dict['gt_starts']
end_label_flat = ret_dict['gt_ends']
rcnn_link_flat = ret_dict['rcnn_link'].view(-1)
rcnn_start_flat = ret_dict['rcnn_start'].view(-1)
rcnn_end_flat = ret_dict['rcnn_end'].view(-1)
# link
pos = (link_label_flat > 0)
neg = (link_label_flat == 0)
tb_dict['rcnn_link_pos'] = torch.sum(pos).item()
tb_dict['rcnn_link_neg'] = torch.sum(neg).item()
rcnn_loss_link = F.l1_loss(rcnn_link_flat, link_label_flat, reduction='mean')
if not torch.isnan(rcnn_loss_link):
rcnn_loss += rcnn_loss_link * cfg.TRAIN.LINK_TRAIN_WEIGHT
if rcnn_loss_link > 0:
tb_dict['rcnn_loss_link_mean'] = rcnn_loss_link.item()
# start end
pos = (start_label_flat > 0)
neg = (start_label_flat == 0)
tb_dict['rcnn_start_pos'] = torch.sum(pos).item()
tb_dict['rcnn_start_neg'] = torch.sum(neg).item()
pos = (end_label_flat > 0)
neg = (end_label_flat == 0)
tb_dict['rcnn_end_pos'] = torch.sum(pos).item()
tb_dict['rcnn_end_neg'] = torch.sum(neg).item()
if cfg.REID.LOSS_LINK == 'L1':
rcnn_loss_start = F.l1_loss(torch.sigmoid(rcnn_start_flat), start_label_flat, reduction='mean')
else:
raise NotImplementedError
if cfg.REID.LOSS_SE == 'L1':
rcnn_loss_end = F.l1_loss(torch.sigmoid(rcnn_end_flat), end_label_flat, reduction='mean')
else:
raise NotImplementedError
if not torch.isnan(rcnn_loss_start):
rcnn_loss += rcnn_loss_start * cfg.TRAIN.SE_TRAIN_WEIGHT
if not torch.isnan(rcnn_loss_end):
rcnn_loss += rcnn_loss_end * cfg.TRAIN.SE_TRAIN_WEIGHT
if rcnn_loss_start > 0:
tb_dict['rcnn_loss_start_mean'] = rcnn_loss_start.item()
if rcnn_loss_end > 0:
tb_dict['rcnn_loss_end_mean'] = rcnn_loss_end.item()
if rcnn_loss > 0:
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss
return model_fn_train
|
419640
|
import os
import six
import functools
from os.path import join
from mock import Mock, patch
from io import BytesIO
from twisted.internet.interfaces import IReactorCore
from twisted.internet.interfaces import IListeningPort
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.internet.address import IPv4Address
from twisted.internet import defer, error, task
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.test import proto_helpers
from txtorcon import TorConfig
from txtorcon import TorControlProtocol
from txtorcon import TorProcessProtocol
from txtorcon import launch
from txtorcon import connect
from txtorcon import AuthBasic
from txtorcon.controller import _is_non_public_numeric_address, Tor, HAVE_ASYNC
from txtorcon.interface import ITorControlProtocol
from .util import TempDir
from zope.interface import implementer, directlyProvides
if HAVE_ASYNC:
from .py3_test_controller import ClientOnionServiceAuthenticationTests3 # noqa: F401
class FakeProcessTransport(proto_helpers.StringTransportWithDisconnection):
pid = -1
reactor = None
def signalProcess(self, signame):
assert self.reactor is not None
self.reactor.callLater(
0,
lambda: self.process_protocol.processEnded(
Failure(error.ProcessTerminated(signal=signame))
)
)
self.reactor.callLater(
0,
lambda: self.process_protocol.processExited(
Failure(error.ProcessTerminated(signal=signame))
)
)
def closeStdin(self):
self.process_protocol.outReceived(b"Opening Control listener")
return
class FakeProcessTransportNeverBootstraps(FakeProcessTransport):
pid = -1
def closeStdin(self):
return
class FakeProcessTransportNoProtocol(FakeProcessTransport):
def closeStdin(self):
pass
@implementer(IListeningPort)
class FakePort(object):
def __init__(self, port):
self._port = port
def startListening(self):
pass
def stopListening(self):
pass
def getHost(self):
return IPv4Address('TCP', "127.0.0.1", self._port)
@implementer(IReactorCore)
class FakeReactor(task.Clock):
def __init__(self, test, trans, on_protocol, listen_ports=[]):
super(FakeReactor, self).__init__()
self.test = test
self.transport = trans
self.transport.reactor = self # XXX FIXME this is a cycle now
self.on_protocol = on_protocol
self.listen_ports = listen_ports
# util.available_tcp_port ends up 'asking' for free ports via
# listenTCP, ultimately, and the answers we send back are from
# this list
def spawnProcess(self, processprotocol, bin, args, env, path,
uid=None, gid=None, usePTY=None, childFDs=None):
self.protocol = processprotocol
self.protocol.makeConnection(self.transport)
self.transport.process_protocol = processprotocol
self.on_protocol(self.protocol)
return self.transport
def addSystemEventTrigger(self, *args):
self.test.assertEqual(args[0], 'before')
self.test.assertEqual(args[1], 'shutdown')
# we know this is just for the temporary file cleanup, so we
# nuke it right away to avoid polluting /tmp by calling the
# callback now.
args[2]()
def removeSystemEventTrigger(self, id):
pass
def listenTCP(self, *args, **kw):
port = self.listen_ports.pop()
return FakePort(port)
def connectTCP(self, host, port, factory, timeout=0, bindAddress=None):
return
def connectUNIX(self, *args, **kw):
return
class LaunchTorTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
self.clock = task.Clock()
def test_ctor_timeout_no_ireactortime(self):
with self.assertRaises(RuntimeError) as ctx:
TorProcessProtocol(lambda: None, timeout=42)
self.assertTrue("Must supply an IReactorTime" in str(ctx.exception))
def _fake_queue(self, cmd):
if cmd.split()[0] == 'PROTOCOLINFO':
return defer.succeed('AUTH METHODS=NULL')
elif cmd == 'GETINFO config/names':
return defer.succeed('config/names=')
elif cmd == 'GETINFO signal/names':
return defer.succeed('signal/names=')
elif cmd == 'GETINFO version':
return defer.succeed('version=0.1.2.3')
elif cmd == 'GETINFO events/names':
return defer.succeed('events/names=STATUS_CLIENT')
elif cmd == 'GETINFO config/defaults':
return defer.succeed('config/defaults=')
return defer.succeed(None)
def _fake_event_listener(self, what, cb):
if what == 'STATUS_CLIENT':
# should ignore non-BOOTSTRAP messages
cb('STATUS_CLIENT not-bootstrap')
cb('STATUS_CLIENT BOOTSTRAP PROGRESS=100 TAG=foo SUMMARY=bar')
return defer.succeed(None)
@defer.inlineCallbacks
def test_launch_tor_unix_controlport(self):
trans = FakeProcessTransport()
trans.protocol = self.protocol
self.protocol.post_bootstrap.callback(self.protocol)
self.protocol._set_valid_events("STATUS_CLIENT")
self.protocol.add_event_listener = self._fake_event_listener
self.protocol.queue_command = self._fake_queue
def on_protocol(proto):
proto.outReceived(b'Bootstrapped 90%\n')
# launch() auto-discovers a SOCKS port
reactor = FakeReactor(self, trans, on_protocol, [9050])
reactor.connectUNIX = Mock()
# prepare a suitable directory for tor unix socket
with TempDir() as tmp:
tmpdir = str(tmp)
os.chmod(tmpdir, 0o0700)
socket_file = join(tmpdir, 'test_socket_file')
with patch('txtorcon.controller.UNIXClientEndpoint') as uce:
endpoint = Mock()
endpoint.connect = Mock(return_value=defer.succeed(self.protocol))
uce.return_value = endpoint
yield launch(
reactor,
control_port="unix:{}".format(socket_file),
tor_binary="/bin/echo",
stdout=Mock(),
stderr=Mock(),
)
self.assertTrue(endpoint.connect.called)
self.assertTrue(uce.called)
self.assertEqual(
socket_file,
uce.mock_calls[0][1][1],
)
@defer.inlineCallbacks
def test_launch_tor_unix_controlport_wrong_perms(self):
reactor = FakeReactor(self, Mock(), None, [9050])
with self.assertRaises(ValueError) as ctx:
with TempDir() as tmp:
tmpdir = str(tmp)
os.chmod(tmpdir, 0o0777)
socket_file = join(tmpdir, 'socket_test')
yield launch(
reactor,
control_port="unix:{}".format(socket_file),
tor_binary="/bin/echo",
stdout=Mock(),
stderr=Mock(),
)
self.assertTrue(
"must only be readable by the user" in str(ctx.exception)
)
@defer.inlineCallbacks
def test_launch_tor_unix_controlport_no_directory(self):
reactor = FakeReactor(self, Mock(), None, [9050])
with self.assertRaises(ValueError) as ctx:
socket_file = '/does/not/exist'
yield launch(
reactor,
control_port="unix:{}".format(socket_file),
tor_binary="/bin/echo",
stdout=Mock(),
stderr=Mock(),
)
self.assertTrue("must exist" in str(ctx.exception))
@defer.inlineCallbacks
def test_launch_tor_non_anonymous_and_socks(self):
reactor = FakeReactor(self, Mock(), None, [9050])
with self.assertRaises(ValueError) as ctx:
yield launch(
reactor,
non_anonymous_mode=True,
socks_port=1234,
tor_binary="/bin/echo",
stdout=Mock(),
stderr=Mock(),
)
self.assertIn("Cannot use SOCKS", str(ctx.exception))
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@defer.inlineCallbacks
def test_launch_fails(self, ftb):
trans = FakeProcessTransport()
def on_proto(protocol):
protocol.processEnded(
Failure(error.ProcessTerminated(12, None, 'statusFIXME'))
)
reactor = FakeReactor(self, trans, on_proto, [1234, 9052])
try:
yield launch(reactor)
self.fail("Should fail")
except RuntimeError:
pass
errs = self.flushLoggedErrors(RuntimeError)
self.assertEqual(1, len(errs))
self.assertTrue(
"Tor exited with error-code 12" in str(errs[0])
)
@defer.inlineCallbacks
def test_launch_no_ireactorcore(self):
try:
yield launch(None)
self.fail("should get exception")
except ValueError as e:
self.assertTrue("provide IReactorCore" in str(e))
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@patch('txtorcon.controller.TorProcessProtocol')
@defer.inlineCallbacks
def test_successful_launch(self, tpp, ftb):
trans = FakeProcessTransport()
reactor = FakeReactor(self, trans, lambda p: None, [1, 2, 3])
config = TorConfig()
def boot(arg=None):
config.post_bootstrap.callback(config)
config.__dict__['bootstrap'] = Mock(side_effect=boot)
config.__dict__['attach_protocol'] = Mock(return_value=defer.succeed(None))
def foo(*args, **kw):
rtn = Mock()
rtn.post_bootstrap = defer.succeed(None)
rtn.when_connected = Mock(return_value=defer.succeed(rtn))
return rtn
tpp.side_effect = foo
tor = yield launch(reactor, _tor_config=config)
self.assertTrue(isinstance(tor, Tor))
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@patch('txtorcon.controller.TorProcessProtocol')
@defer.inlineCallbacks
def test_successful_launch_non_anonymous(self, tpp, ftb):
trans = FakeProcessTransport()
reactor = FakeReactor(self, trans, lambda p: None, [1, 2, 3])
config = TorConfig()
def boot(arg=None):
config.post_bootstrap.callback(config)
config.__dict__['bootstrap'] = Mock(side_effect=boot)
config.__dict__['attach_protocol'] = Mock(return_value=defer.succeed(None))
def foo(*args, **kw):
rtn = Mock()
rtn.post_bootstrap = defer.succeed(None)
rtn.when_connected = Mock(return_value=defer.succeed(rtn))
return rtn
tpp.side_effect = foo
tor = yield launch(reactor, _tor_config=config, non_anonymous_mode=True)
self.assertTrue(isinstance(tor, Tor))
self.assertTrue(config.HiddenServiceNonAnonymousMode)
with self.assertRaises(Exception):
yield tor.web_agent()
with self.assertRaises(Exception):
yield tor.dns_resolve('meejah.ca')
@defer.inlineCallbacks
def test_quit(self):
tor = Tor(Mock(), Mock())
tor._protocol = Mock()
tor._process_protocol = Mock()
yield tor.quit()
@defer.inlineCallbacks
def test_quit_no_protocol(self):
tor = Tor(Mock(), Mock())
tor._protocol = None
tor._process_protocol = None
with self.assertRaises(RuntimeError) as ctx:
yield tor.quit()
self.assertTrue('no protocol instance' in str(ctx.exception))
@patch('txtorcon.controller.socks')
@defer.inlineCallbacks
def test_dns_resolve(self, fake_socks):
answer = object()
cfg = Mock()
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({"SocksPort": "9050"}))
tor = Tor(Mock(), proto, _tor_config=cfg)
fake_socks.resolve = Mock(return_value=defer.succeed(answer))
ans = yield tor.dns_resolve("meejah.ca")
self.assertEqual(ans, answer)
@patch('txtorcon.controller.socks')
@defer.inlineCallbacks
def test_dns_resolve_default_socksport(self, fake_socks):
answer = object()
cfg = Mock()
from txtorcon.testutil import FakeControlProtocol
proto = FakeControlProtocol([
{"SocksPort": "DEFAULT"},
"9050",
])
proto.answers
tor = Tor(Mock(), proto, _tor_config=cfg)
fake_socks.resolve = Mock(return_value=defer.succeed(answer))
ans = yield tor.dns_resolve("meejah.ca")
self.assertEqual(ans, answer)
@patch('txtorcon.controller.socks')
@defer.inlineCallbacks
def test_dns_resolve_existing_socks(self, fake_socks):
answer = object()
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({"SocksPort": "9050"}))
tor = Tor(Mock(), proto)
fake_socks.resolve = Mock(return_value=defer.succeed(answer))
ans0 = yield tor.dns_resolve("meejah.ca")
# do it again to exercise the _default_socks_port() case when
# we already got the default
fake_socks.resolve = Mock(return_value=defer.succeed(answer))
ans1 = yield tor.dns_resolve("meejah.ca")
self.assertEqual(ans0, answer)
self.assertEqual(ans1, answer)
@patch('txtorcon.controller.socks')
@defer.inlineCallbacks
def test_dns_resolve_no_configured_socks(self, fake_socks):
answer = object()
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({"SocksPort": "9050"}))
cfg = Mock()
tor = Tor(Mock(), proto, _tor_config=cfg)
def boom(*args, **kw):
raise RuntimeError("no socks")
cfg.socks_endpoint = Mock(side_effect=boom)
fake_socks.resolve = Mock(return_value=defer.succeed(answer))
ans = yield tor.dns_resolve("meejah.ca")
self.assertEqual(ans, answer)
@patch('txtorcon.controller.socks')
@defer.inlineCallbacks
def test_dns_resolve_ptr(self, fake_socks):
answer = object()
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({"SocksPort": "9050"}))
tor = Tor(Mock(), proto)
fake_socks.resolve_ptr = Mock(return_value=defer.succeed(answer))
ans = yield tor.dns_resolve_ptr("192.168.3.11")
self.assertEqual(ans, answer)
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@defer.inlineCallbacks
def test_successful_launch_tcp_control(self, ftb):
"""
full end-to-end test of a launch, faking things out at a "lower
level" than most of the other tests
"""
trans = FakeProcessTransport()
def on_protocol(proto):
pass
reactor = FakeReactor(self, trans, on_protocol, [1, 2, 3])
def connect_tcp(host, port, factory, timeout=0, bindAddress=None):
addr = Mock()
factory.doStart()
proto = factory.buildProtocol(addr)
tpp = proto._wrappedProtocol
tpp.add_event_listener = self._fake_event_listener
tpp.queue_command = self._fake_queue
proto.makeConnection(Mock())
return proto
reactor.connectTCP = connect_tcp
config = TorConfig()
tor = yield launch(reactor, _tor_config=config, control_port='1234', timeout=30)
self.assertTrue(isinstance(tor, Tor))
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@patch('txtorcon.controller.sys')
@patch('txtorcon.controller.TorProcessProtocol')
@defer.inlineCallbacks
def test_successful_launch_tcp_control_non_unix(self, tpp, _sys, ftb):
_sys.platform = 'not darwin or linux2'
trans = FakeProcessTransport()
reactor = FakeReactor(self, trans, lambda p: None, [1, 2, 3])
config = TorConfig()
def boot(arg=None):
config.post_bootstrap.callback(config)
config.__dict__['bootstrap'] = Mock(side_effect=boot)
config.__dict__['attach_protocol'] = Mock(return_value=defer.succeed(None))
def foo(*args, **kw):
rtn = Mock()
rtn.post_bootstrap = defer.succeed(None)
rtn.when_connected = Mock(return_value=defer.succeed(rtn))
return rtn
tpp.side_effect = foo
tor = yield launch(reactor, _tor_config=config)
self.assertTrue(isinstance(tor, Tor))
@patch('txtorcon.controller.sys')
@patch('txtorcon.controller.pwd')
@patch('txtorcon.controller.os.geteuid')
@patch('txtorcon.controller.os.chown')
def test_launch_root_changes_tmp_ownership(self, chown, euid, _pwd, _sys):
_pwd.return_value = 1000
_sys.platform = 'linux2'
euid.return_value = 0
reactor = Mock()
directlyProvides(reactor, IReactorCore)
# note! we're providing enough options here that we react the
# "chown" before any 'yield' statements in launch, so we don't
# actually have to wait for it... a little rickety, though :/
launch(reactor, tor_binary='/bin/echo', user='chuffington', socks_port='1234')
self.assertEqual(1, chown.call_count)
@defer.inlineCallbacks
def test_launch_timeout_exception(self):
"""
we provide a timeout, and it expires
"""
trans = Mock()
trans.signalProcess = Mock(side_effect=error.ProcessExitedAlready)
trans.loseConnection = Mock()
on_proto = Mock()
react = FakeReactor(self, trans, on_proto, [1234])
def creator():
return defer.succeed(Mock())
d = launch(
reactor=react,
tor_binary='/bin/echo',
socks_port=1234,
timeout=10,
connection_creator=creator,
)
react.advance(12)
self.assertTrue(trans.loseConnection.called)
with self.assertRaises(RuntimeError) as ctx:
yield d
self.assertTrue("timeout while launching" in str(ctx.exception))
@defer.inlineCallbacks
def test_launch_timeout_process_exits(self):
# cover the "one more edge case" where we get a processEnded()
# but we've already "done" a timeout.
trans = Mock()
trans.signalProcess = Mock()
trans.loseConnection = Mock()
class MyFakeReactor(FakeReactor):
def spawnProcess(self, processprotocol, bin, args, env, path,
uid=None, gid=None, usePTY=None, childFDs=None):
self.protocol = processprotocol
self.protocol.makeConnection(self.transport)
self.transport.process_protocol = processprotocol
self.on_protocol(self.protocol)
status = Mock()
status.value.exitCode = None
processprotocol.processEnded(status)
return self.transport
react = MyFakeReactor(self, trans, Mock(), [1234, 9052])
d = launch(
reactor=react,
tor_binary='/bin/echo',
timeout=10,
data_directory='/dev/null',
)
react.advance(20)
try:
yield d
except RuntimeError as e:
self.assertTrue("Tor was killed" in str(e))
errs = self.flushLoggedErrors(RuntimeError)
self.assertEqual(1, len(errs))
self.assertTrue("Tor was killed" in str(errs[0]))
@defer.inlineCallbacks
def test_launch_wrong_stdout(self):
try:
yield launch(
FakeReactor(self, Mock(), Mock()),
stdout=object(),
tor_binary='/bin/echo',
)
self.fail("Should have thrown an error")
except RuntimeError as e:
self.assertTrue("file-like object needed" in str(e).lower())
@defer.inlineCallbacks
def test_launch_with_timeout(self):
# XXX not entirely sure what this was/is supposed to be
# testing, but it covers an extra 7 lines of code??
timeout = 5
def connector(proto, trans):
proto._set_valid_events('STATUS_CLIENT')
proto.makeConnection(trans)
proto.post_bootstrap.callback(proto)
return proto.post_bootstrap
def on_protocol(proto):
proto.outReceived(b'Bootstrapped 100%\n')
trans = FakeProcessTransportNeverBootstraps()
trans.protocol = self.protocol
creator = functools.partial(connector, Mock(), Mock())
react = FakeReactor(self, trans, on_protocol, [1234, 9052])
with self.assertRaises(RuntimeError) as ctx:
d = launch(react, connection_creator=creator,
timeout=timeout, tor_binary='/bin/echo')
# FakeReactor is a task.Clock subclass and +1 just to be sure
react.advance(timeout + 1)
yield d
self.assertTrue(
'timeout while launching Tor' in str(ctx.exception)
)
# could/should just use return from this to do asserts?
self.flushLoggedErrors(RuntimeError)
@defer.inlineCallbacks
def test_tor_produces_stderr_output(self):
def connector(proto, trans):
proto._set_valid_events('STATUS_CLIENT')
proto.makeConnection(trans)
proto.post_bootstrap.callback(proto)
return proto.post_bootstrap
def on_protocol(proto):
proto.errReceived(b'Something went horribly wrong!\n')
trans = FakeProcessTransport()
trans.protocol = Mock()
fakeout = BytesIO()
fakeerr = BytesIO()
creator = functools.partial(connector, Mock(), Mock())
try:
yield launch(
FakeReactor(self, trans, on_protocol, [1234, 9052]),
connection_creator=creator,
tor_binary='/bin/echo',
stdout=fakeout,
stderr=fakeerr,
)
self.fail() # should't get callback
except RuntimeError as e:
self.assertEqual(b'', fakeout.getvalue())
self.assertEqual(b'Something went horribly wrong!\n', fakeerr.getvalue())
self.assertTrue(
'Something went horribly wrong!' in str(e)
)
@patch('txtorcon.controller.find_tor_binary', return_value='/bin/echo')
@defer.inlineCallbacks
def test_tor_connection_fails(self, ftb):
trans = FakeProcessTransport()
def on_protocol(proto):
proto.outReceived(b'Opening Control listener\n')
reactor = FakeReactor(self, trans, on_protocol, [1, 2, 3])
fails = ['one']
def connect_tcp(host, port, factory, timeout=0, bindAddress=None):
if len(fails):
fails.pop()
raise error.CannotListenError('on-purpose-error', None, None)
addr = Mock()
factory.doStart()
proto = factory.buildProtocol(addr)
tpp = proto._wrappedProtocol
def fake_event_listener(what, cb):
if what == 'STATUS_CLIENT':
# should ignore non-BOOTSTRAP messages
cb('STATUS_CLIENT not-bootstrap')
cb('STATUS_CLIENT BOOTSTRAP PROGRESS=100 TAG=foo SUMMARY=bar')
return defer.succeed(None)
tpp.add_event_listener = fake_event_listener
def fake_queue(cmd):
if cmd.split()[0] == 'PROTOCOLINFO':
return defer.succeed('AUTH METHODS=NULL')
elif cmd == 'GETINFO config/names':
return defer.succeed('config/names=')
elif cmd == 'GETINFO signal/names':
return defer.succeed('signal/names=')
elif cmd == 'GETINFO version':
return defer.succeed('version=0.1.2.3')
elif cmd == 'GETINFO events/names':
return defer.succeed('events/names=STATUS_CLIENT')
elif cmd == 'GETINFO config/defaults':
return defer.succeed('config/defaults=')
return defer.succeed(None)
tpp.queue_command = fake_queue
proto.makeConnection(Mock())
return proto
reactor.connectTCP = connect_tcp
config = TorConfig()
tor = yield launch(reactor, _tor_config=config, control_port='1234', timeout=30)
errs = self.flushLoggedErrors()
self.assertTrue(isinstance(tor, Tor))
self.assertEqual(1, len(errs))
def test_tor_connection_user_data_dir(self):
"""
Test that we don't delete a user-supplied data directory.
"""
config = TorConfig()
config.OrPort = 1234
class Connector:
def __call__(self, proto, trans):
proto._set_valid_events('STATUS_CLIENT')
proto.makeConnection(trans)
proto.post_bootstrap.callback(proto)
return proto.post_bootstrap
def on_protocol(proto):
proto.outReceived(b'Bootstrapped 90%\n')
with TempDir() as tmp:
my_dir = str(tmp)
config.DataDirectory = my_dir
trans = FakeProcessTransport()
trans.protocol = self.protocol
creator = functools.partial(Connector(), self.protocol, self.transport)
d = launch(
FakeReactor(self, trans, on_protocol, [1234, 9051]),
connection_creator=creator,
tor_binary='/bin/echo',
data_directory=my_dir,
control_port=0,
)
def still_have_data_dir(tor, tester):
tor._process_protocol.cleanup() # FIXME? not really unit-testy as this is sort of internal function
tester.assertTrue(os.path.exists(my_dir))
d.addCallback(still_have_data_dir, self)
d.addErrback(self.fail)
return d
def _test_tor_connection_user_control_port(self):
"""
Confirm we use a user-supplied control-port properly
"""
config = TorConfig()
config.OrPort = 1234
config.ControlPort = 4321
class Connector:
def __call__(self, proto, trans):
proto._set_valid_events('STATUS_CLIENT')
proto.makeConnection(trans)
proto.post_bootstrap.callback(proto)
return proto.post_bootstrap
def on_protocol(proto):
proto.outReceived(b'Bootstrapped 90%\n')
proto.outReceived(b'Bootstrapped 100%\n')
trans = FakeProcessTransport()
trans.protocol = self.protocol
creator = functools.partial(Connector(), self.protocol, self.transport)
d = launch(
FakeReactor(self, trans, on_protocol, [9052]),
connection_creator=creator,
tor_binary='/bin/echo',
socks_port=1234,
)
def check_control_port(proto, tester):
# we just want to ensure launch() didn't mess with
# the controlport we set
tester.assertEqual(config.ControlPort, 4321)
d.addCallback(check_control_port, self)
d.addErrback(self.fail)
return d
@defer.inlineCallbacks
def _test_tor_connection_default_control_port(self):
"""
Confirm a default control-port is set if not user-supplied.
"""
class Connector:
def __call__(self, proto, trans):
proto._set_valid_events('STATUS_CLIENT')
proto.makeConnection(trans)
proto.post_bootstrap.callback(proto)
return proto.post_bootstrap
def on_protocol(proto):
proto.outReceived(b'Bootstrapped 90%\n')
proto.outReceived(b'Bootstrapped 100%\n')
trans = FakeProcessTransport()
trans.protocol = self.protocol
creator = functools.partial(Connector(), self.protocol, self.transport)
tor = yield launch(
FakeReactor(self, trans, on_protocol, [9052]),
connection_creator=creator,
tor_binary='/bin/echo',
socks_port=1234,
)
cfg = yield tor.get_config()
self.assertEqual(cfg.ControlPort, 9052)
def test_progress_updates(self):
self.got_progress = False
def confirm_progress(p, t, s):
self.assertEqual(p, 10)
self.assertEqual(t, 'tag')
self.assertEqual(s, 'summary')
self.got_progress = True
process = TorProcessProtocol(None, confirm_progress)
process.progress(10, 'tag', 'summary')
self.assertTrue(self.got_progress)
def test_quit_process(self):
process = TorProcessProtocol(None)
process.transport = Mock()
d = process.quit()
self.assertFalse(d.called)
process.processExited(Failure(error.ProcessTerminated(exitCode=15)))
self.assertTrue(d.called)
process.processEnded(Failure(error.ProcessDone(None)))
self.assertTrue(d.called)
errs = self.flushLoggedErrors()
self.assertEqual(1, len(errs))
self.assertTrue("Tor exited with error-code" in str(errs[0]))
def test_quit_process_already(self):
process = TorProcessProtocol(None)
process.transport = Mock()
def boom(sig):
self.assertEqual(sig, 'TERM')
raise error.ProcessExitedAlready()
process.transport.signalProcess = Mock(side_effect=boom)
d = process.quit()
process.processEnded(Failure(error.ProcessDone(None)))
self.assertTrue(d.called)
errs = self.flushLoggedErrors()
self.assertEqual(1, len(errs))
self.assertTrue("Tor exited with error-code" in str(errs[0]))
@defer.inlineCallbacks
def test_quit_process_error(self):
process = TorProcessProtocol(None)
process.transport = Mock()
def boom(sig):
self.assertEqual(sig, 'TERM')
raise RuntimeError("Something bad")
process.transport.signalProcess = Mock(side_effect=boom)
try:
yield process.quit()
except RuntimeError as e:
self.assertEqual("Something bad", str(e))
def XXXtest_status_updates(self):
process = TorProcessProtocol(None)
process.status_client("NOTICE CONSENSUS_ARRIVED")
def XXXtest_tor_launch_success_then_shutdown(self):
"""
There was an error where we double-callbacked a deferred,
i.e. success and then shutdown. This repeats it.
"""
process = TorProcessProtocol(None)
process.status_client(
'STATUS_CLIENT BOOTSTRAP PROGRESS=100 TAG=foo SUMMARY=cabbage'
)
# XXX why this assert?
self.assertEqual(None, process._connected_cb)
class Value(object):
exitCode = 123
class Status(object):
value = Value()
process.processEnded(Status())
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
@defer.inlineCallbacks
def test_launch_no_control_port(self):
'''
See Issue #80. This allows you to launch tor with a TorConfig
with ControlPort=0 in case you don't want a control connection
at all. In this case you get back a TorProcessProtocol and you
own both pieces. (i.e. you have to kill it yourself).
'''
trans = FakeProcessTransportNoProtocol()
trans.protocol = self.protocol
def creator(*args, **kw):
print("Bad: connection creator called")
self.fail()
def on_protocol(proto):
self.process_proto = proto
proto.outReceived(b'Bootstrapped 90%\n')
proto.outReceived(b'Bootstrapped 100%\n')
reactor = FakeReactor(self, trans, on_protocol, [9052, 9999])
tor = yield launch(
reactor=reactor,
connection_creator=creator,
tor_binary='/bin/echo',
socks_port=1234,
control_port=0,
)
self.assertEqual(tor._process_protocol, self.process_proto)
d = tor.quit()
reactor.advance(0)
yield d
errs = self.flushLoggedErrors()
self.assertEqual(1, len(errs))
self.assertTrue("Tor was killed" in str(errs[0]))
def create_endpoint(*args, **kw):
ep = Mock()
directlyProvides(ep, IStreamClientEndpoint)
return ep
def create_endpoint_fails(*args, **kw):
def go_boom(*args, **kw):
raise RuntimeError("boom")
ep = Mock(side_effect=go_boom)
directlyProvides(ep, IStreamClientEndpoint)
return ep
class ConnectTorTests(unittest.TestCase):
@patch('txtorcon.controller.TorConfig')
@patch('txtorcon.controller.UNIXClientEndpoint', side_effect=create_endpoint)
@patch('txtorcon.controller.TCP4ClientEndpoint', side_effect=create_endpoint)
@defer.inlineCallbacks
def test_connect_defaults(self, fake_cfg, fake_unix, fake_tcp):
"""
happy-path test, ensuring there are no exceptions
"""
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
yield connect(reactor)
@patch('txtorcon.controller.TorConfig')
@defer.inlineCallbacks
def test_connect_provide_endpoint(self, fake_cfg):
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
ep = Mock()
with self.assertRaises(ValueError) as ctx:
yield connect(reactor, ep)
self.assertTrue('IStreamClientEndpoint' in str(ctx.exception))
@patch('txtorcon.controller.TorConfig')
@defer.inlineCallbacks
def test_connect_provide_multiple_endpoints(self, fake_cfg):
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
ep0 = Mock()
ep1 = Mock()
with self.assertRaises(ValueError) as ctx:
yield connect(reactor, [ep0, ep1])
self.assertTrue('IStreamClientEndpoint' in str(ctx.exception))
@patch('txtorcon.controller.TorConfig')
@defer.inlineCallbacks
def test_connect_multiple_endpoints_error(self, fake_cfg):
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
ep0 = Mock()
def boom(*args, **kw):
raise RuntimeError("the bad thing")
ep0.connect = boom
directlyProvides(ep0, IStreamClientEndpoint)
with self.assertRaises(RuntimeError) as ctx:
yield connect(reactor, ep0)
self.assertEqual("the bad thing", str(ctx.exception))
@patch('txtorcon.controller.TorConfig')
@defer.inlineCallbacks
def test_connect_multiple_endpoints_many_errors(self, fake_cfg):
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
ep0 = Mock()
ep1 = Mock()
def boom0(*args, **kw):
raise RuntimeError("the bad thing")
def boom1(*args, **kw):
raise RuntimeError("more sadness")
ep0.connect = boom0
ep1.connect = boom1
directlyProvides(ep0, IStreamClientEndpoint)
directlyProvides(ep1, IStreamClientEndpoint)
with self.assertRaises(RuntimeError) as ctx:
yield connect(reactor, [ep0, ep1])
self.assertTrue("the bad thing" in str(ctx.exception))
self.assertTrue("more sadness" in str(ctx.exception))
@patch('txtorcon.controller.TorConfig')
@defer.inlineCallbacks
def test_connect_success(self, fake_cfg):
transport = Mock()
reactor = FakeReactor(self, transport, lambda: None)
torcfg = Mock()
fake_cfg.from_protocol = Mock(return_value=torcfg)
ep0 = Mock()
proto = object()
torcfg.protocol = proto
ep0.connect = Mock(return_value=proto)
directlyProvides(ep0, IStreamClientEndpoint)
ans = yield connect(reactor, [ep0])
cfg = yield ans.get_config()
self.assertEqual(cfg, torcfg)
self.assertEqual(ans.protocol, proto)
class WebAgentTests(unittest.TestCase):
def setUp(self):
proto = Mock()
self.pool = Mock()
self.expected_response = object()
proto.request = Mock(return_value=defer.succeed(self.expected_response))
self.pool.getConnection = Mock(return_value=defer.succeed(proto))
@defer.inlineCallbacks
def test_web_agent_defaults(self):
reactor = Mock()
# XXX is there a faster way to do this? better reactor fake?
fake_host = Mock()
fake_host.port = 1234
fake_port = Mock()
fake_port.getHost = Mock(return_value=fake_host)
reactor.listenTCP = Mock(return_value=fake_port)
cfg = Mock()
cfg.create_socks_endpoint = Mock(return_value=defer.succeed("9050"))
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({}))
directlyProvides(proto, ITorControlProtocol)
tor = Tor(reactor, proto, _tor_config=cfg)
try:
agent = tor.web_agent(pool=self.pool)
except ImportError as e:
if 'IAgentEndpointFactory' in str(e):
print("Skipping; appears we don't have web support")
return
resp = yield agent.request(b'GET', b'meejah.ca')
self.assertEqual(self.expected_response, resp)
@defer.inlineCallbacks
def test_web_agent_deferred(self):
socks_d = defer.succeed("9151")
reactor = Mock()
cfg = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
tor = Tor(reactor, proto, _tor_config=cfg)
agent = tor.web_agent(pool=self.pool, socks_endpoint=socks_d)
resp = yield agent.request(b'GET', b'meejah.ca')
self.assertEqual(self.expected_response, resp)
@defer.inlineCallbacks
def test_web_agent_endpoint(self):
socks = Mock()
directlyProvides(socks, IStreamClientEndpoint)
reactor = Mock()
cfg = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
tor = Tor(reactor, proto, _tor_config=cfg)
agent = tor.web_agent(pool=self.pool, socks_endpoint=socks)
resp = yield agent.request(b'GET', b'meejah.ca')
self.assertEqual(self.expected_response, resp)
@defer.inlineCallbacks
def test_web_agent_error(self):
reactor = Mock()
cfg = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
tor = Tor(reactor, proto, _tor_config=cfg)
with self.assertRaises(ValueError) as ctx:
agent = tor.web_agent(pool=self.pool, socks_endpoint=object())
yield agent.request(B'GET', b'meejah.ca')
self.assertTrue("'socks_endpoint' should be" in str(ctx.exception))
class TorAttributeTests(unittest.TestCase):
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.cfg = Mock()
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
def test_process(self):
self.assertIs(
None,
self.tor.process
)
def test_when_connected_already(self):
tpp = TorProcessProtocol(lambda: None)
# hmmmmmph, delving into internal state "because way shorter
# test"
tpp._connected_listeners = None
d = tpp.when_connected()
self.assertTrue(d.called)
self.assertEqual(d.result, tpp)
def test_process_exists(self):
gold = object()
self.tor._process_protocol = gold
self.assertEqual(gold, self.tor.process)
def test_protocol_exists(self):
self.tor.protocol
def test_version_passthrough(self):
self.tor.version
class TorAttributeTestsNoConfig(unittest.TestCase):
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.tor = Tor(reactor, proto)
@defer.inlineCallbacks
def test_get_config(self):
with patch('txtorcon.controller.TorConfig') as torcfg:
gold = object()
torcfg.from_protocol = Mock(return_value=defer.succeed(gold))
cfg = yield self.tor.get_config()
self.assertEqual(gold, cfg)
class DormantTests(unittest.TestCase):
def setUp(self):
reactor = Mock()
self.proto = Mock()
self.cfg = Mock()
self.tor = Tor(reactor, self.proto, _tor_config=self.cfg)
self.tor.dns_resolve = Mock()
@defer.inlineCallbacks
def test_ready(self):
self.proto.get_info = Mock(return_value={
"dormant": "0",
"status/enough-dir-info": "1",
"status/circuit-established": "1",
})
ready = yield self.tor.is_ready()
self.assertTrue(ready, "should be ready")
@defer.inlineCallbacks
def test_become_ready_already(self):
self.proto.get_info = Mock(return_value={
"dormant": "0",
"status/enough-dir-info": "1",
"status/circuit-established": "1",
})
yield self.tor.become_ready()
self.assertTrue(
self.tor.dns_resolve.mock_calls == []
)
@defer.inlineCallbacks
def test_become_ready_asleep(self):
self.proto.get_info = Mock(return_value={
"dormant": "1",
"status/enough-dir-info": "1",
"status/circuit-established": "1",
})
yield self.tor.become_ready()
self.assertEqual(1, len(self.tor.dns_resolve.mock_calls))
class TorStreamTests(unittest.TestCase):
def setUp(self):
reactor = Mock()
proto = Mock()
proto.get_conf = Mock(return_value=defer.succeed({"SocksPort": "9050"}))
self.cfg = Mock()
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
def test_sanity(self):
self.assertTrue(_is_non_public_numeric_address(u'10.0.0.0'))
self.assertTrue(_is_non_public_numeric_address(u'::1'))
def test_v6(self):
import ipaddress
ipaddress.ip_address(u'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
def test_stream_private_ip(self):
with self.assertRaises(Exception) as ctx:
self.tor.stream_via('10.0.0.1', '1234')
self.assertTrue("isn't going to work over Tor", str(ctx.exception))
def test_stream_v6(self):
with self.assertRaises(Exception) as ctx:
self.tor.stream_via(u'::1', '1234')
self.assertTrue("isn't going to work over Tor", str(ctx.exception))
def test_public_v6(self):
# should not be an error
self.tor.stream_via(u'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '4321')
def test_public_v4(self):
# should not be an error
self.tor.stream_via(u'8.8.8.8', '4321')
def test_stream_host(self):
self.tor.stream_via(b'meejah.ca', '1234')
class IteratorTests(unittest.TestCase):
def XXXtest_iterate_torconfig(self):
cfg = TorConfig()
cfg.FooBar = 'quux'
cfg.save()
cfg.Quux = 'blimblam'
keys = sorted([k for k in cfg])
self.assertEqual(['FooBar', 'Quux'], keys)
class FactoryFunctionTests(unittest.TestCase):
"""
Mostly simple 'does not blow up' sanity checks of simple
factory-functions.
"""
@defer.inlineCallbacks
def test_create_state(self):
tor = Tor(Mock(), Mock())
with patch('txtorcon.controller.TorState') as ts:
ts.post_boostrap = defer.succeed('boom')
yield tor.create_state()
# no assertions; we just testing this doesn't raise
def test_str(self):
tor = Tor(Mock(), Mock())
str(tor)
# just testing the __str__ method doesn't explode
class EphemeralOnionFactoryTests(unittest.TestCase):
"""
the onion-service factory functions verify their args
"""
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.cfg = Mock()
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
@defer.inlineCallbacks
def test_ports_not_sequence(self):
with self.assertRaises(ValueError):
yield self.tor.create_onion_service("not a sequence")
@defer.inlineCallbacks
def test_ports_contain_non_ints(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([object()])
self.assertIn("non-integer entry", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints2(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([set([1, 2, 3])])
self.assertIn("contain a single int", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints3(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([('not', 'an int')])
self.assertIn("non-integer", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints4(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([('1234', 'bad')])
self.assertIn("be either an integer", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints5(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([object()])
self.assertIn("non-integer entry", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints6(self):
from txtorcon.controller import _validate_ports
yield _validate_ports(Mock(), [80])
@defer.inlineCallbacks
def test_ports_contain_non_ints_unix_ok(self):
from txtorcon.controller import _validate_ports
yield _validate_ports(Mock(), [(80, "unix:/dev/null")])
@defer.inlineCallbacks
def test_ports_contain_2_tuple(self):
from txtorcon.controller import _validate_ports
yield _validate_ports(Mock(), [(80, 54321)])
@defer.inlineCallbacks
def test_ports_contain_string0(self):
from txtorcon.controller import _validate_ports
yield _validate_ports(Mock(), [u"80 127.0.0.1:1234"])
@defer.inlineCallbacks
def test_ports_contain_string1(self):
from txtorcon.controller import _validate_ports
yield _validate_ports(Mock(), ["80 127.0.0.1:1234"])
@defer.inlineCallbacks
def test_version_invalid(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_onion_service([80], version=1)
self.assertIn("The only valid Onion service versions", str(ctx.exception))
def test_auth(self):
self.tor.create_authenticated_onion_endpoint(80, AuthBasic(['alice']))
def test_auth_fs(self):
self.tor.create_filesystem_authenticated_onion_endpoint(80, '/dev/null', AuthBasic(['alice']))
@defer.inlineCallbacks
def test_happy_path(self):
self.cfg.EphemeralOnionServices = []
with patch('txtorcon.onion.available_tcp_port', return_value=1234):
with patch.object(self.cfg, 'tor_protocol') as proto:
proto.queue_command = Mock(return_value="ServiceID=deadbeef\nPrivateKey=BlobbyMcBlobberson")
d = self.tor.create_onion_service([80])
f = proto.add_event_listener.mock_calls[0][1][1]
f("UPLOAD deadbeef x dirauth0")
f("UPLOADED x x dirauth0")
service = yield d
self.assertEqual("deadbeef.onion", service.hostname)
self.assertEqual("BlobbyMcBlobberson", service.private_key)
self.assertEqual(set(['80 127.0.0.1:1234']), service.ports)
class FilesystemOnionFactoryTests(unittest.TestCase):
"""
the onion-service factory functions verify their args
"""
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.cfg = Mock()
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
self.hsdir = self.mktemp()
os.mkdir(self.hsdir)
@defer.inlineCallbacks
def test_ports_not_sequence(self):
with self.assertRaises(ValueError):
yield self.tor.create_filesystem_onion_service("not a sequence", self.hsdir)
@defer.inlineCallbacks
def test_ports_contain_non_ints0(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_filesystem_onion_service([('not', 'an int')], self.hsdir)
self.assertIn("a tuple with a non-integer", str(ctx.exception))
@defer.inlineCallbacks
def test_ports_contain_non_ints1(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_filesystem_onion_service([object()], self.hsdir)
self.assertIn("non-integer", str(ctx.exception))
@defer.inlineCallbacks
def test_version_invalid(self):
with self.assertRaises(ValueError) as ctx:
yield self.tor.create_filesystem_onion_service([80], self.hsdir, version=1)
self.assertIn("The only valid Onion service versions", str(ctx.exception))
@defer.inlineCallbacks
def test_happy_path(self):
self.cfg.OnionServices = []
with patch('txtorcon.onion.available_tcp_port', return_value=1234):
with patch.object(self.cfg, 'tor_protocol') as proto:
with open(join(self.hsdir, "hostname"), "w") as f:
f.write("deadbeef.onion\n")
with open(join(self.hsdir, "hs_ed25519_secret_key"), "w") as f:
f.write("<PASSWORD>")
proto.version = "0.3.2.1"
proto.queue_command = Mock(return_value="OK")
d = self.tor.create_filesystem_onion_service([80], self.hsdir)
f = proto.add_event_listener.mock_calls[0][1][1]
f("UPLOAD deadbeef x dirauth0")
f("UPLOADED x x dirauth0")
service = yield d
self.assertEqual("deadbeef.onion", service.hostname)
self.assertEqual(b"BlobbyMcBlobberson", service.private_key)
self.assertEqual(set(['80 127.0.0.1:1234']), set(service.ports))
class FilesystemOnionEndpointFactoryTests(unittest.TestCase):
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.cfg = Mock()
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
@defer.inlineCallbacks
def test_filesystem_endpoint(self):
yield self.tor.create_filesystem_onion_endpoint(80, '/dev/null')
@defer.inlineCallbacks
def test_ephemeral_endpoint(self):
yield self.tor.create_onion_endpoint(80)
class ClientOnionServiceAuthenticationTests(unittest.TestCase):
def setUp(self):
reactor = Mock()
proto = Mock()
directlyProvides(proto, ITorControlProtocol)
self.cfg = TorConfig()
self.cfg.HidServAuth = ["existing.onion some_token"]
self.tor = Tor(reactor, proto, _tor_config=self.cfg)
@defer.inlineCallbacks
def test_add(self):
yield self.tor.add_onion_authentication("foo.onion", "a_token")
self.assertIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
@defer.inlineCallbacks
def test_add_twice(self):
yield self.tor.add_onion_authentication("foo.onion", "a_token")
self.assertIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
# a second add of the same token should be fine
yield self.tor.add_onion_authentication("foo.onion", "a_token")
self.assertIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
@defer.inlineCallbacks
def test_add_twice_different_token(self):
yield self.tor.add_onion_authentication("foo.onion", "a_token")
self.assertIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
# a second token with a different value: error
with self.assertRaises(ValueError):
yield self.tor.add_onion_authentication("foo.onion", "a_different_token")
@defer.inlineCallbacks
def test_remove(self):
yield self.tor.remove_onion_authentication("existing.onion")
self.assertEqual(0, len(self.cfg.HidServAuth))
@defer.inlineCallbacks
def test_remove_unfound(self):
yield self.tor.remove_onion_authentication("existing.onion")
self.assertEqual(0, len(self.cfg.HidServAuth))
yield self.tor.remove_onion_authentication("non_existing.onion")
self.assertEqual(0, len(self.cfg.HidServAuth))
def test_context_manager_py2(self):
if not six.PY2:
return
with self.assertRaises(RuntimeError):
self.tor.onion_authentication("foo.onion", "token")
@defer.inlineCallbacks
def test_add_and_remove(self):
yield self.tor.add_onion_authentication("foo.onion", "a_token")
self.assertIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
yield self.tor.remove_onion_authentication("foo.onion")
self.assertNotIn(
"foo.onion a_token",
self.cfg.HidServAuth,
)
|
419677
|
from itertools import chain
import sys
from types import SimpleNamespace as namespace
from xml.sax.saxutils import escape
from scipy.spatial import distance
import numpy as np
from AnyQt.QtWidgets import (
QFormLayout,
QApplication,
QGraphicsEllipseItem,
QGraphicsSceneMouseEvent,
QToolTip,
)
from AnyQt.QtGui import QPen
from AnyQt.QtCore import Qt, QObject, QEvent, QSize, QRectF, QLineF, QTimer, QPoint
from AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import pyqtgraph as pg
from Orange.data import Table, Domain, StringVariable, ContinuousVariable
from Orange.projection.freeviz import FreeViz
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils.annotated_data import (
create_annotated_table,
ANNOTATED_DATA_SIGNAL_NAME,
create_groups_table,
)
from Orange.widgets.visualize.owscatterplotgraph import (
OWScatterPlotGraph,
InteractiveViewBox,
HelpEventDelegate,
)
from Orange.widgets.visualize.utils.plotutils import AnchorItem
from Orange.widgets.widget import Input, Output
from Orange.widgets import report
class AsyncUpdateLoop(QObject):
"""
Run/drive an coroutine from the event loop.
This is a utility class which can be used for implementing
asynchronous update loops. I.e. coroutines which periodically yield
control back to the Qt event loop.
"""
Next = QEvent.registerEventType()
#: State flags
Idle, Running, Cancelled, Finished = 0, 1, 2, 3
#: The coroutine has yielded control to the caller (with `object`)
yielded = Signal(object)
#: The coroutine has finished/exited (either with an exception
#: or with a return statement)
finished = Signal()
#: The coroutine has returned (normal return statement / StopIteration)
returned = Signal(object)
#: The coroutine has exited with with an exception.
raised = Signal(object)
#: The coroutine was cancelled/closed.
cancelled = Signal()
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.__coroutine = None
self.__next_pending = False # Flag for compressing scheduled events
self.__in_next = False
self.__state = AsyncUpdateLoop.Idle
@Slot(object)
def setCoroutine(self, loop):
"""
Set the coroutine.
The coroutine will be resumed (repeatedly) from the event queue.
If there is an existing coroutine set it is first closed/cancelled.
Raises an RuntimeError if the current coroutine is running.
"""
if self.__coroutine is not None:
self.__coroutine.close()
self.__coroutine = None
self.__state = AsyncUpdateLoop.Cancelled
self.cancelled.emit()
self.finished.emit()
if loop is not None:
self.__coroutine = loop
self.__state = AsyncUpdateLoop.Running
self.__schedule_next()
@Slot()
def cancel(self):
"""
Cancel/close the current coroutine.
Raises an RuntimeError if the current coroutine is running.
"""
self.setCoroutine(None)
def state(self):
"""
Return the current state.
"""
return self.__state
def isRunning(self):
return self.__state == AsyncUpdateLoop.Running
def __schedule_next(self):
if not self.__next_pending:
self.__next_pending = True
QTimer.singleShot(10, self.__on_timeout)
def __next(self):
if self.__coroutine is not None:
try:
rval = next(self.__coroutine)
except StopIteration as stop:
self.__state = AsyncUpdateLoop.Finished
self.returned.emit(stop.value)
self.finished.emit()
self.__coroutine = None
except BaseException as er:
self.__state = AsyncUpdateLoop.Finished
self.raised.emit(er)
self.finished.emit()
self.__coroutine = None
else:
self.yielded.emit(rval)
self.__schedule_next()
@Slot()
def __on_timeout(self):
assert self.__next_pending
self.__next_pending = False
if not self.__in_next:
self.__in_next = True
try:
self.__next()
finally:
self.__in_next = False
else:
# warn
self.__schedule_next()
def customEvent(self, event):
if event.type() == AsyncUpdateLoop.Next:
self.__on_timeout()
else:
super().customEvent(event)
class FreeVizInteractiveViewBox(InteractiveViewBox):
def __init__(self, graph, enable_menu=False):
self.mousestate = 0
self.point_i = None
super().__init__(graph, enable_menu)
def _dragtip_pos(self):
return 10, 10
def mouseDragEvent(self, ev, axis=None):
master = self.graph.master
if master.data is None:
super().mouseDragEvent(ev, axis)
return
pos = self.childGroup.mapFromParent(ev.pos())
minradius = master.radius / 100 + 1e-5
points = master.plotdata.anchors
mask = np.zeros((len(points)), dtype=bool)
for i, point in enumerate(points):
if np.linalg.norm(point) > minradius:
mask[i] = True
np_pos = np.array([[pos.x(), pos.y()]])
distances = distance.cdist(np_pos, points[:, :2])[0]
is_near = False if not len(distances[mask]) else np.min(distances[mask]) < 0.1
if ev.button() != Qt.LeftButton or (ev.start and not is_near):
self.mousestate = 2 # finished
if self.mousestate == 2:
if ev.finish:
self.mousestate = 0 # ready for new task
super().mouseDragEvent(ev, axis)
return
ev.accept()
if ev.start:
self.setCursor(Qt.ClosedHandCursor)
self.mousestate = 1 # working
self.point_i = np.flatnonzero(mask)[np.argmin(distances[mask])]
master.randomize_indices()
is_moving = True
if self.mousestate == 1:
if ev.finish:
self.setCursor(Qt.OpenHandCursor)
self.mousestate = 0
is_moving = False
points[self.point_i][0] = pos.x()
points[self.point_i][1] = pos.y()
if is_moving:
master.manual_move_anchor()
else:
master.setup_plot(reset_view=False)
self.graph.show_indicator(point_i=self.point_i)
class EventDelegate(HelpEventDelegate):
def __init__(self, delegate, delegate2, parent=None):
self.delegate2 = delegate2
super().__init__(delegate, parent=parent)
def eventFilter(self, obj, ev):
if isinstance(ev, QGraphicsSceneMouseEvent):
self.delegate2(ev)
return super().eventFilter(obj, ev)
SELECTION_WIDTH = 5
RANGE = QRectF(-1.05, -1.05, 2.1, 2.1)
class OWFreeVizGraph(OWScatterPlotGraph):
jitter_size = settings.Setting(0)
def __init__(self, scatter_widget, parent=None, name="None", view_box=None):
super().__init__(scatter_widget, parent=parent, _=name, view_box=view_box)
self._tooltip_delegate = EventDelegate(self.help_event, self._show_indicator)
self.plot_widget.scene().installEventFilter(self._tooltip_delegate)
self.master = scatter_widget
for axis_loc in ["left", "bottom"]:
self.plot_widget.hideAxis(axis_loc)
def update_data(self, attr_x, attr_y, reset_view=True):
super().update_data(attr_x, attr_y, reset_view=reset_view)
for axis in ["left", "bottom"]:
self.plot_widget.hideAxis(axis)
if reset_view:
self.view_box.setRange(RANGE, padding=0.025)
self.master.viewbox.setAspectLocked(True, 1)
self.master.viewbox.init_history()
self.master.viewbox.tag_history()
def _show_indicator(self, ev):
scene = self.plot_widget.scene()
if self.scatterplot_item is None or scene.drag_tooltip.isVisible():
return False
for indicator in self.master.plotdata.indicators:
self.plot_widget.removeItem(indicator)
self.master.plotdata.indicators = []
pos = self.scatterplot_item.mapFromScene(ev.scenePos())
x = pos.x()
y = pos.y()
master = self.master
minradius = master.radius / 100 + 1e-5
points = master.plotdata.anchors
mask = np.zeros((len(points)), dtype=bool)
for i, point in enumerate(points):
if np.linalg.norm(point) > minradius:
mask[i] = True
np_pos = np.array([[x, y]])
distances = distance.cdist(np_pos, points[:, :2])[0]
if len(distances[mask]) and np.min(distances[mask]) < 0.08:
if self.view_box.mousestate == 0:
self.view_box.setCursor(Qt.OpenHandCursor)
self.show_indicator(
point_i=np.flatnonzero(mask)[np.argmin(distances[mask])]
)
else:
self.view_box.setCursor(Qt.ArrowCursor)
return True
def show_indicator(self, point_i):
points = self.master.plotdata.anchors
func = self.view_box.childGroup.mapToDevice
dx = (func(QPoint(1, 0)) - func(QPoint(-1, 0))).x()
scene_size = 600 / dx
self.master.plotdata.indicators.append(
MoveIndicator(points[point_i][0], points[point_i][1], scene_size=scene_size)
)
self.plot_widget.addItem(self.master.plotdata.indicators[0])
def help_event(self, event):
if self.scatterplot_item is None:
return False
act_pos = self.scatterplot_item.mapFromScene(event.scenePos())
points = self.scatterplot_item.pointsAt(act_pos)
text = ""
attr = lambda i: self.domain.attributes[i]
if len(points):
for i, p in enumerate(points):
index = p.data()
text += "Attributes:\n"
text += "".join(
" {} = {}\n".format(attr(i).name, self.data[index][attr(i)])
for i in self.master.plotdata.topattrs[index]
)
if len(self.domain.attributes) > 10:
text += " ... and {} others\n\n".format(
len(self.domain.attributes) - 12
)
# class_var is always:
text += "Class:\n {} = {}\n".format(
self.domain.class_var.name,
self.data[index][self.data.domain.class_var],
)
if i < len(points) - 1:
text += "------------------\n"
text = '<span style="white-space:pre">{}</span>'.format(escape(text))
QToolTip.showText(event.screenPos(), text, widget=self.plot_widget)
return True
else:
return False
MAX_ITERATIONS = 1000
MAX_ANCHORS = 20
MAX_POINTS = 300
MAX_INSTANCES = 10000
class OWFreeViz(widget.OWWidget):
name = "FreeViz"
description = "Displays FreeViz projection"
icon = "icons/Freeviz.svg"
priority = 240
class Inputs:
data = Input("Data", Table, default=True)
data_subset = Input("Data Subset", Table)
class Outputs:
selected_data = Output("Selected Data", Table, default=True)
annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Table)
components = Output("Components", Table)
#: Initialization type
Circular, Random = 0, 1
jitter_sizes = [0, 0.1, 0.5, 1, 2]
settings_version = 2
settingsHandler = settings.DomainContextHandler()
radius = settings.Setting(0)
initialization = settings.Setting(Circular)
auto_commit = settings.Setting(True)
resolution = 256
graph = settings.SettingProvider(OWFreeVizGraph)
ReplotRequest = QEvent.registerEventType()
graph_name = "graph.plot_widget.plotItem"
class Warning(widget.OWWidget.Warning):
sparse_not_supported = widget.Msg("Sparse data is ignored.")
class Error(widget.OWWidget.Error):
no_class_var = widget.Msg("Need a class variable")
not_enough_class_vars = widget.Msg(
"Needs discrete class variable " "with at lest 2 values"
)
features_exceeds_instances = widget.Msg(
"Algorithm should not be used when "
"number of features exceeds the number "
"of instances."
)
too_many_data_instances = widget.Msg("Cannot handle so large data.")
no_valid_data = widget.Msg("No valid data.")
def __init__(self):
super().__init__()
self.data = None
self.subset_data = None
self._subset_mask = None
self._validmask = None
self._X = None
self._Y = None
self._selection = None
self.__replot_requested = False
self.variable_x = ContinuousVariable("freeviz-x")
self.variable_y = ContinuousVariable("freeviz-y")
box0 = gui.vBox(self.mainArea, True, margin=0)
self.graph = OWFreeVizGraph(
self, box0, "Plot", view_box=FreeVizInteractiveViewBox
)
box0.layout().addWidget(self.graph.plot_widget)
plot = self.graph.plot_widget
box = gui.widgetBox(self.controlArea, "Optimization", spacing=10)
form = QFormLayout(
labelAlignment=Qt.AlignLeft,
formAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,
verticalSpacing=10,
)
form.addRow(
"Initialization",
gui.comboBox(
box,
self,
"initialization",
items=["Circular", "Random"],
callback=self.reset_initialization,
),
)
box.layout().addLayout(form)
self.btn_start = gui.button(
widget=box,
master=self,
label="Optimize",
callback=self.toogle_start,
enabled=False,
)
self.viewbox = plot.getViewBox()
self.replot = None
g = self.graph.gui
g.point_properties_box(self.controlArea)
self.models = g.points_models
box = gui.widgetBox(self.controlArea, "Show anchors")
self.rslider = gui.hSlider(
box,
self,
"radius",
minValue=0,
maxValue=100,
step=5,
label="Radius",
createLabel=False,
ticks=True,
callback=self.update_radius,
)
self.rslider.setTickInterval(0)
self.rslider.setPageStep(10)
box = gui.vBox(self.controlArea, "Plot Properties")
g.add_widgets([g.JitterSizeSlider], box)
g.add_widgets([g.ShowLegend, g.ClassDensity, g.LabelOnlySelected], box)
self.graph.box_zoom_select(self.controlArea)
self.controlArea.layout().addStretch(100)
self.icons = gui.attributeIconDict
p = self.graph.plot_widget.palette()
self.graph.set_palette(p)
gui.auto_commit(
self.controlArea,
self,
"auto_commit",
"Send Selection",
"Send Automatically",
)
self.graph.zoom_actions(self)
# FreeViz
self._loop = AsyncUpdateLoop(parent=self)
self._loop.yielded.connect(self.__set_projection)
self._loop.finished.connect(self.__freeviz_finished)
self._loop.raised.connect(self.__on_error)
self._new_plotdata()
def keyPressEvent(self, event):
super().keyPressEvent(event)
self.graph.update_tooltip(event.modifiers())
def keyReleaseEvent(self, event):
super().keyReleaseEvent(event)
self.graph.update_tooltip(event.modifiers())
def update_radius(self):
# Update the anchor/axes visibility
assert not self.plotdata is None
if self.plotdata.hidecircle is None:
return
minradius = self.radius / 100 + 1e-5
for anchor, item in zip(self.plotdata.anchors, self.plotdata.anchoritem):
item.setVisible(np.linalg.norm(anchor) > minradius)
self.plotdata.hidecircle.setRect(
QRectF(-minradius, -minradius, 2 * minradius, 2 * minradius)
)
def toogle_start(self):
if self._loop.isRunning():
self._loop.cancel()
if isinstance(self, OWFreeViz):
self.btn_start.setText("Optimize")
self.progressBarFinished(processEvents=False)
else:
self._start()
def _start(self):
"""
Start the projection optimization.
"""
assert not self.plotdata is None
X, Y = self.plotdata.X, self.plotdata.Y
anchors = self.plotdata.anchors
def update_freeviz(interval, initial):
anchors = initial
while True:
res = FreeViz.freeviz(
X, Y, scale=False, center=False, initial=anchors, maxiter=interval
)
_, anchors_new = res[:2]
yield res[:2]
if np.allclose(anchors, anchors_new, rtol=1e-5, atol=1e-4):
return
anchors = anchors_new
interval = 10 # TODO
self._loop.setCoroutine(update_freeviz(interval, anchors))
self.btn_start.setText("Stop")
self.progressBarInit(processEvents=False)
self.setBlocking(True)
self.setStatusMessage("Optimizing")
def reset_initialization(self):
"""
Reset the current 'anchor' initialization, and restart the
optimization if necessary.
"""
running = self._loop.isRunning()
if running:
self._loop.cancel()
if self.data is not None:
self._clear_plot()
self.setup_plot()
if running:
self._start()
def __set_projection(self, res):
# Set/update the projection matrix and coordinate embeddings
# assert self.plotdata is not None, "__set_projection call unexpected"
assert not self.plotdata is None
increment = 1 # TODO
self.progressBarAdvance(
increment * 100.0 / MAX_ITERATIONS, processEvents=False
) # TODO
embedding_coords, projection = res
self.plotdata.embedding_coords = embedding_coords
self.plotdata.anchors = projection
self._update_xy()
self.update_radius()
self.update_density()
def __freeviz_finished(self):
# Projection optimization has finished
self.btn_start.setText("Optimize")
self.setStatusMessage("")
self.setBlocking(False)
self.progressBarFinished(processEvents=False)
self.commit()
def __on_error(self, err):
sys.excepthook(type(err), err, getattr(err, "__traceback__"))
def _update_xy(self):
# Update the plotted embedding coordinates
self.graph.plot_widget.clear()
coords = self.plotdata.embedding_coords
radius = np.max(np.linalg.norm(coords, axis=1))
self.plotdata.embedding_coords = coords / radius
self.plot(show_anchors=(len(self.data.domain.attributes) < MAX_ANCHORS))
def _new_plotdata(self):
self.plotdata = namespace(
validmask=None,
embedding_coords=None,
anchors=[],
anchoritem=[],
X=None,
Y=None,
indicators=[],
hidecircle=None,
data=None,
items=[],
topattrs=None,
rand=None,
selection=None, # np.array
)
def _anchor_circle(self):
# minimum visible anchor radius (radius)
minradius = self.radius / 100 + 1e-5
for item in chain(self.plotdata.anchoritem, self.plotdata.items):
self.viewbox.removeItem(item)
self.plotdata.anchoritem = []
self.plotdata.items = []
for anchor, var in zip(self.plotdata.anchors, self.data.domain.attributes):
if True or np.linalg.norm(anchor) > minradius:
axitem = AnchorItem(line=QLineF(0, 0, *anchor), text=var.name)
axitem.setVisible(np.linalg.norm(anchor) > minradius)
axitem.setPen(pg.mkPen((100, 100, 100)))
axitem.setArrowVisible(True)
self.plotdata.anchoritem.append(axitem)
self.viewbox.addItem(axitem)
hidecircle = QGraphicsEllipseItem()
hidecircle.setRect(QRectF(-minradius, -minradius, 2 * minradius, 2 * minradius))
_pen = QPen(Qt.lightGray, 1)
_pen.setCosmetic(True)
hidecircle.setPen(_pen)
self.viewbox.addItem(hidecircle)
self.plotdata.items.append(hidecircle)
self.plotdata.hidecircle = hidecircle
def update_colors(self):
pass
def sizeHint(self):
return QSize(800, 500)
def _clear(self):
"""
Clear/reset the widget state
"""
self._loop.cancel()
self.data = None
self._selection = None
self._clear_plot()
def _clear_plot(self):
for item in chain(self.plotdata.anchoritem, self.plotdata.items):
self.viewbox.removeItem(item)
self.graph.plot_widget.clear()
self._new_plotdata()
def init_attr_values(self):
self.graph.set_domain(self.data)
@Inputs.data
def set_data(self, data):
self.clear_messages()
self._clear()
self.closeContext()
if data is not None:
if data and data.is_sparse():
self.Warning.sparse_not_supported()
data = None
elif data.domain.class_var is None:
self.Error.no_class_var()
data = None
elif (
data.domain.class_var.is_discrete
and len(data.domain.class_var.values) < 2
):
self.Error.not_enough_class_vars()
data = None
if data and len(data.domain.attributes) > data.X.shape[0]:
self.Error.features_exceeds_instances()
data = None
if data is not None:
valid_instances_count = self._prepare_freeviz_data(data)
if valid_instances_count > MAX_INSTANCES:
self.Error.too_many_data_instances()
data = None
elif valid_instances_count == 0:
self.Error.no_valid_data()
data = None
self.data = data
self.init_attr_values()
if data is not None:
self.cb_class_density.setEnabled(data.domain.has_discrete_class)
self.openContext(data)
self.btn_start.setEnabled(True)
else:
self.btn_start.setEnabled(False)
self._X = self._Y = None
self.graph.new_data(None, None)
@Inputs.data_subset
def set_subset_data(self, subset):
self.subset_data = subset
self.plotdata.subset_mask = None
self.controls.graph.alpha_value.setEnabled(subset is None)
def handleNewSignals(self):
if all(v is not None for v in [self.data, self.subset_data]):
dataids = self.data.ids.ravel()
subsetids = np.unique(self.subset_data.ids)
self._subset_mask = np.in1d(dataids, subsetids, assume_unique=True)
if self._X is not None:
self.setup_plot(True)
self.commit()
def customEvent(self, event):
if event.type() == OWFreeViz.ReplotRequest:
self.__replot_requested = False
self.setup_plot()
else:
super().customEvent(event)
def _prepare_freeviz_data(self, data):
X = data.X
Y = data.Y
mask = np.bitwise_or.reduce(np.isnan(X), axis=1)
mask |= np.isnan(Y)
validmask = ~mask
X = X[validmask, :]
Y = Y[validmask]
if not len(X):
self._X = None
return 0
if data.domain.class_var.is_discrete:
Y = Y.astype(int)
X = X - np.mean(X, axis=0)
span = np.ptp(X, axis=0)
X[:, span > 0] /= span[span > 0].reshape(1, -1)
self._X = X
self._Y = Y
self._validmask = validmask
return len(X)
def setup_plot(self, reset_view=True):
assert not self._X is None
self.graph.jitter_continuous = True
self.__replot_requested = False
X = self.plotdata.X = self._X
self.plotdata.Y = self._Y
self.plotdata.validmask = self._validmask
self.plotdata.selection = (
self._selection
if self._selection is not None
else np.zeros(len(self._validmask), dtype=np.uint8)
)
anchors = self.plotdata.anchors
if len(anchors) == 0:
if self.initialization == self.Circular:
anchors = FreeViz.init_radial(X.shape[1])
else:
anchors = FreeViz.init_random(X.shape[1], 2)
EX = np.dot(X, anchors)
c = np.zeros((X.shape[0], X.shape[1]))
for i in range(X.shape[0]):
c[i] = np.argsort(
(np.power(X[i] * anchors[:, 0], 2) + np.power(X[i] * anchors[:, 1], 2))
)[::-1]
self.plotdata.topattrs = np.array(c, dtype=int)[:, :10]
radius = np.max(np.linalg.norm(EX, axis=1))
self.plotdata.anchors = anchors
coords = EX / radius
self.plotdata.embedding_coords = coords
if reset_view:
self.viewbox.setRange(RANGE)
self.viewbox.setAspectLocked(True, 1)
self.plot(reset_view=reset_view)
def randomize_indices(self):
X = self._X
self.plotdata.rand = (
np.random.choice(len(X), MAX_POINTS, replace=False)
if len(X) > MAX_POINTS
else None
)
def manual_move_anchor(self, show_anchors=True):
self.__replot_requested = False
X = self.plotdata.X = self._X
anchors = self.plotdata.anchors
validmask = self.plotdata.validmask
EX = np.dot(X, anchors)
data_x = self.data.X[validmask]
data_y = self.data.Y[validmask]
radius = np.max(np.linalg.norm(EX, axis=1))
if self.plotdata.rand is not None:
rand = self.plotdata.rand
EX = EX[rand]
data_x = data_x[rand]
data_y = data_y[rand]
selection = self.plotdata.selection[validmask]
selection = selection[rand]
else:
selection = self.plotdata.selection[validmask]
coords = EX / radius
if show_anchors:
self._anchor_circle()
attributes = (
() + self.data.domain.attributes + (self.variable_x, self.variable_y)
)
domain = Domain(attributes=attributes, class_vars=self.data.domain.class_vars)
data = Table.from_numpy(domain, X=np.hstack((data_x, coords)), Y=data_y)
self.graph.new_data(data, None)
self.graph.selection = selection
self.graph.update_data(self.variable_x, self.variable_y, reset_view=False)
def plot(self, reset_view=False, show_anchors=True):
if show_anchors:
self._anchor_circle()
attributes = (
() + self.data.domain.attributes + (self.variable_x, self.variable_y)
)
domain = Domain(
attributes=attributes,
class_vars=self.data.domain.class_vars,
metas=self.data.domain.metas,
)
mask = self.plotdata.validmask
array = np.zeros((len(self.data), 2), dtype=np.float)
array[mask] = self.plotdata.embedding_coords
data = self.data.transform(domain)
data[:, self.variable_x] = array[:, 0].reshape(-1, 1)
data[:, self.variable_y] = array[:, 1].reshape(-1, 1)
subset_data = (
data[self._subset_mask & mask]
if self._subset_mask is not None and len(self._subset_mask)
else None
)
self.plotdata.data = data
self.graph.new_data(data[mask], subset_data)
if self.plotdata.selection is not None:
self.graph.selection = self.plotdata.selection[self.plotdata.validmask]
self.graph.update_data(self.variable_x, self.variable_y, reset_view=reset_view)
def reset_graph_data(self, *_):
if self.data is not None:
self.graph.rescale_data()
self._update_graph()
def _update_graph(self, reset_view=True, **_):
self.graph.zoomStack = []
assert not self.graph.data is None
self.graph.update_data(self.variable_x, self.variable_y, reset_view)
def update_density(self):
if self.graph.data is None:
return
self._update_graph(reset_view=False)
def selection_changed(self):
if self.graph.selection is not None:
pd = self.plotdata
pd.selection[pd.validmask] = self.graph.selection
self._selection = pd.selection
self.commit()
def prepare_data(self):
pass
def commit(self):
selected = annotated = components = None
graph = self.graph
if self.data is not None and self.plotdata.validmask is not None:
name = self.data.name
metas = () + self.data.domain.metas + (self.variable_x, self.variable_y)
domain = Domain(
attributes=self.data.domain.attributes,
class_vars=self.data.domain.class_vars,
metas=metas,
)
data = self.plotdata.data.transform(domain)
validmask = self.plotdata.validmask
mask = np.array(validmask, dtype=int)
mask[mask == 1] = (
graph.selection if graph.selection is not None else [False * len(mask)]
)
selection = (
np.array([], dtype=np.uint8) if mask is None else np.flatnonzero(mask)
)
if len(selection):
selected = data[selection]
selected.name = name + ": selected"
selected.attributes = self.data.attributes
if graph.selection is not None and np.max(graph.selection) > 1:
annotated = create_groups_table(data, mask)
else:
annotated = create_annotated_table(data, selection)
annotated.attributes = self.data.attributes
annotated.name = name + ": annotated"
comp_domain = Domain(
self.data.domain.attributes, metas=[StringVariable(name="component")]
)
metas = np.array([["FreeViz 1"], ["FreeViz 2"]])
components = Table.from_numpy(
comp_domain, X=self.plotdata.anchors.T, metas=metas
)
components.name = name + ": components"
self.Outputs.selected_data.send(selected)
self.Outputs.annotated_data.send(annotated)
self.Outputs.components.send(components)
def send_report(self):
if self.data is None:
return
def name(var):
return var and var.name
caption = report.render_items_vert(
(
("Color", name(self.graph.attr_color)),
("Label", name(self.graph.attr_label)),
("Shape", name(self.graph.attr_shape)),
("Size", name(self.graph.attr_size)),
(
"Jittering",
self.graph.jitter_size != 0
and "{} %".format(self.graph.jitter_size),
),
)
)
self.report_plot()
if caption:
self.report_caption(caption)
class MoveIndicator(pg.GraphicsObject):
def __init__(
self, x, y, parent=None, line=QLineF(), scene_size=1, text="", **kwargs
):
super().__init__(parent, **kwargs)
self.arrows = [
pg.ArrowItem(
pos=(
x - scene_size * 0.07 * np.cos(np.radians(angle)),
y + scene_size * 0.07 * np.sin(np.radians(angle)),
),
parent=self,
angle=angle,
headLen=13,
tipAngle=45,
brush=pg.mkColor(128, 128, 128),
)
for angle in (0, 90, 180, 270)
]
def paint(self, painter, option, widget):
pass
def boundingRect(self):
return QRectF()
def main(argv=None):
import sip
argv = sys.argv[1:] if argv is None else argv
if argv:
filename = argv[0]
else:
filename = "zoo"
data = Table(filename)
app = QApplication([])
w = OWFreeViz()
w.set_data(data)
w.set_subset_data(data[::10])
w.handleNewSignals()
w.show()
w.raise_()
r = app.exec()
w.set_data(None)
w.saveSettings()
sip.delete(w)
del w
return r
if __name__ == "__main__":
sys.exit(main())
|
419695
|
from kivy.app import App
from kivy.uix.popup import Popup
from kivy.properties import StringProperty, NumericProperty, ListProperty
from kivy.logger import Logger
from kivy.lang import Builder
from models import Search, Filters
from kivymd.dialog import MDDialog
from kivymd.textfields import MDTextField
from kivymd.button import MDFlatButton
Builder.load_file("kv/popups.kv")
class CaptchaPopup(MDDialog):
action = StringProperty("")
def __init__(self, **kwargs):
super(CaptchaPopup, self).__init__(**kwargs)
self.add_action_button("Try Again", action=lambda *x: self.try_again())
self.add_action_button("Continue",
action=lambda *x: self.non_restricted())
def try_again(self):
self.action = "try_again"
self.dismiss()
def non_restricted(self):
self.action = "front_screen"
self.dismiss()
class SearchPopup(MDDialog):
search_suggestions = ListProperty([])
search_buttons = ListProperty([])
def __init__(self, **kwargs):
super(SearchPopup, self).__init__(**kwargs)
self.add_action_button("Search", action=lambda *x: self.savesearch())
def savesearch(self):
db = App.get_running_app().db
already_exists = db.query(Search).filter_by(searchterm=self.ids.searcharea.text).first()
front_screen = App.get_running_app().root.ids.sadpanda_screen_manager.get_screen("front_screen")
front_screen.do_search(self.ids.searcharea.text)
if already_exists:
self.dismiss()
else:
newsearch = Search(searchterm=self.ids.searcharea.text)
db.add(newsearch)
db.commit()
self.dismiss()
def on_search_suggestions(self, object, value):
for button in self.search_buttons:
self.ids.searchlist.remove_widget(button)
self.search_buttons = []
for suggestion in value:
button = MDFlatButton(text=suggestion, size_hint=(1, None))
button.bind(on_release=self.button_search)
self.search_buttons.append(button)
self.ids.searchlist.add_widget(button)
def button_search(self, instance):
searchterm = instance.text
Logger.info("Button text: {}".format(searchterm))
front_screen = App.get_running_app().root.ids.sadpanda_screen_manager.get_screen("front_screen")
front_screen.do_search(searchterm)
self.dismiss()
def find_search(self, searchterm):
if len(searchterm) > 0:
db = App.get_running_app().db
suggestions = db.query(Search).filter(Search.searchterm.like("{}%".format(searchterm))).all()
Logger.info("Suggestions: {}".format(suggestions))
terms = set()
for suggestion in reversed(suggestions):
terms.add(suggestion.searchterm)
self.search_suggestions = list(terms)
def open_filters(self):
fpop = FilterPopup()
fpop.bind(on_dismiss=self.set_filters)
fpop.open()
def set_filters(self, instance):
App.get_running_app().root.set_filters(instance)
class SearchArea(MDTextField):
def savesearch(self):
newsearch = Search(searchterm=self.ids.searchstring.text)
db = App.get_running_app().db
db.add(newsearch)
db.commit()
self.dismiss()
class FilterPopup(Popup):
doujinshi = NumericProperty(0)
manga = NumericProperty(0)
artistcg = NumericProperty(0)
gamecg = NumericProperty(0)
western = NumericProperty(0)
nonh = NumericProperty(0)
imageset = NumericProperty(0)
cosplay = NumericProperty(0)
asianporn = NumericProperty(0)
misc = NumericProperty(0)
def __init__(self, **kwargs):
super(FilterPopup, self).__init__(**kwargs)
db = App.get_running_app().db
filters = db.query(Filters).order_by(Filters.id.desc()).first()
if filters:
self.doujinshi = filters.doujinshi
self.manga = filters.manga
self.artistcg = filters.artistcg
self.gamecg = filters.gamecg
self.western = filters.western
self.nonh = filters.nonh
self.imageset = filters.imageset
self.cosplay = filters.cosplay
self.asianporn = filters.asianporn
self.misc = filters.misc
if self.doujinshi == 1:
self.ids.doujinshi.active = True
if self.manga == 1:
self.ids.manga.active = True
if self.artistcg == 1:
self.ids.artistcg.active = True
if self.gamecg == 1:
self.ids.gamecg.active = True
if self.western == 1:
self.ids.western.active = True
if self.nonh == 1:
self.ids.nonh.active = True
if self.imageset == 1:
self.ids.imageset.active = True
if self.cosplay == 1:
self.ids.cosplay.active = True
if self.asianporn == 1:
self.ids.asianporn.active = True
if self.misc == 1:
self.ids.misc.active = True
|
419699
|
from typing import List, Optional
import pulumi_aws as aws
from infra.config import STACK_NAME
import pulumi
class Cache(pulumi.ComponentResource):
"""
An ElastiCache cluster instance.
"""
def __init__(
self,
name: str,
subnet_ids: pulumi.Input[List[str]],
vpc_id: pulumi.Input[str],
# We grab this security group from the Nomad Agents stack.
nomad_agent_security_group_id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
super().__init__("grapl:Cache", name, None, opts)
redis_port = 6379
self.subnet_group = aws.elasticache.SubnetGroup(
f"{name}-cache-subnet-group",
subnet_ids=subnet_ids,
opts=pulumi.ResourceOptions(parent=self),
)
self.security_group = aws.ec2.SecurityGroup(
f"{name}-cache-security-group",
vpc_id=vpc_id,
# Tags are necessary for the moment so we can look up the resource from a different pulumi stack.
# Once this is refactored we can remove the tags
tags={"Name": f"{name}-{STACK_NAME}"},
opts=pulumi.ResourceOptions(parent=self),
)
# Allow communication between nomad-agents and redis
# These are in different VPCs with the peering done in the networking module
aws.ec2.SecurityGroupRule(
"nomad-agents-egress-to-redis",
type="egress",
security_group_id=nomad_agent_security_group_id,
from_port=redis_port,
to_port=redis_port,
protocol="tcp",
source_security_group_id=self.security_group.id,
opts=pulumi.ResourceOptions(parent=self.security_group),
)
aws.ec2.SecurityGroupRule(
"redis-ingress-from-nomad-agents",
type="ingress",
security_group_id=self.security_group.id,
from_port=redis_port,
to_port=redis_port,
protocol="tcp",
source_security_group_id=nomad_agent_security_group_id,
opts=pulumi.ResourceOptions(parent=self.security_group),
)
# Note that this is a single-node Redis "cluster"
# (a.k.a. "Cluster Mode Disabled")
self.cluster = aws.elasticache.Cluster(
f"{name}-cluster",
engine="redis",
port=redis_port,
node_type="cache.t2.small",
num_cache_nodes=1,
subnet_group_name=self.subnet_group.name,
security_group_ids=[self.security_group.id],
opts=pulumi.ResourceOptions(parent=self),
)
self.register_outputs({})
@property
def endpoint(self) -> pulumi.Output[str]:
"""
Return an endpoint URL for accessing this cache from other services.
Uses the "redis://" protocol.
"""
# NOTE: This only works because we have single node
# clusters. If we add more, we should expose them all in a
# better way.
return pulumi.Output.all(
host=self.host, # type: ignore[arg-type]
port=self.port, # type: ignore[arg-type]
).apply(lambda args: f"redis://{args['host']}:{args['port']}")
@property
def host(self) -> pulumi.Output[str]:
"""
Returns the host of the first (and only) node in the cluster.
"""
return self.cluster.cache_nodes[0].address # type: ignore[no-any-return]
@property
def port(self) -> pulumi.Output[int]:
"""
Returns the port of the first (and only) node in the cluster.
"""
return self.cluster.cache_nodes[0].port # type: ignore[no-any-return]
def allow_egress_to_cache_for(
self, name: str, origin: aws.ec2.SecurityGroup
) -> None:
"""
Create an egress rule for the `origin` security group, allowing communication to the cache's port.
The security group rule will be a child of the `origin` security group Pulumi resource.
`name` is a descriptive string that will be incorporated into the Pulumi resource name of the security group rule.
"""
aws.ec2.SecurityGroupRule(
f"{name}-egress-to-cache",
type="egress",
description=self.cluster.id.apply(
lambda id: f"Allow outbound traffic to Redis cluster {id}"
),
from_port=self.port,
to_port=self.port,
protocol=aws.ec2.ProtocolType.TCP,
security_group_id=origin.id,
source_security_group_id=self.security_group.id,
opts=pulumi.ResourceOptions(parent=origin),
)
|
419766
|
class Solution(object):
def divisorGame(self, N):
"""
:type N: int
:rtype: bool
"""
return True if N % 2 == 0 else False
|
419767
|
import functools
from copy import deepcopy
from datetime import date
from itertools import chain, groupby
from controls.exceptions import MissingPeriodError
from controls.models import ModuleSettings, Period
from crispy_forms.helper import FormHelper
from crispy_forms.utils import render_crispy_form
from django.conf import settings
from django.contrib import messages
from django.contrib.postgres.search import TrigramSimilarity
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import transaction
from django.db.models import Q, Subquery, Sum
from django.http import (Http404, HttpResponse, HttpResponseForbidden,
HttpResponseRedirect, JsonResponse)
from django.shortcuts import get_object_or_404, render, reverse
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from django.views.generic import DetailView, ListView, View
from django.views.generic.base import ContextMixin, TemplateResponseMixin
from mptt.utils import get_cached_trees
from nominals.models import Nominal
from querystring_parser import parser
from accountancy.helpers import (AuditTransaction, JSONBlankDate,
bulk_delete_with_history, sort_multiple)
def get_trig_vectors_for_different_inputs(model_attrs_and_inputs):
"""
This builds a TrigramSimilarity search across many model attributes
for the given search input.
"""
trig_vectors = [
TrigramSimilarity(model_attr, search_input)
for model_attr, search_input in model_attrs_and_inputs
]
return functools.reduce(lambda a, b: a + b, trig_vectors)
def get_value(obj, field):
try:
return getattr(obj, field)
except AttributeError:
return obj.get(field)
"""
Scroller and ScrollInView are used by the class which supports JQueryDataTable scroller i.e. JQueryDataTableScrollerMixin
JQueryDataTable scroller differs to normal pagination in that the slice will not necessarily, in fact rarely,
be the same slice as a page. For example the slice could be from the middle of one page to the middle of the next.
paginate_by is a method which supports pagination and uses the Paginator class from django core. It returns both an instance of
the paginator class and a page object which is just the object returned by calling a page number from the paginator object. The paginator
object is used for getting the count of the whole filtered set; the page object contains the slice of objects which will be rendered on the UI.
The Scroller class below uses the same interface as the paginator class and the ScrollerInView class uses the same interface as
the page object class. The second is necessary to take the slice we need and the first is necessary to return this object.
This way we can swap these classes for the paginator classes without changing the code.
"""
class ScrollerInView:
def __init__(self, queryset_or_object_list, start, length):
self.queryset_or_object_list = queryset_or_object_list
self.start = start
self.length = length
@property
def object_list(self):
start = self.start
length = self.length
return self.queryset_or_object_list[int(start): int(start) + int(length)]
class Scroller:
def __init__(self, queryset_or_object_list, start, length):
if isinstance(queryset_or_object_list, (list,)):
self.is_queryset = False
else:
self.is_queryset = True # at least we expect a queryset
self._q = queryset_or_object_list
self.start = start
self.length = length
@property
def queryset(self):
if not self.is_queryset:
raise AttributeError("Queryset was not passed to Scroller")
return self._q.all() # a new copy of the queryset object so original is not evaluated
@property
def queryset_or_object_list(self):
if self.is_queryset:
return self.queryset
else:
return self._q
@property
def count(self):
if self.is_queryset:
return self.queryset.count()
else:
return len(self._q)
@property
def visible(self):
return ScrollerInView(self.queryset_or_object_list, self.start, self.length)
class JQueryDataTableMixin:
"""
A mixin to help with implementing jQueryDataTables where the data is gotten via Ajax.
"""
paginate_by = 25
searchable_fields = None
row_identifier = None
def get(self, request, *args, **kwargs):
if request.is_ajax():
table_data = self.get_table_data()
return JsonResponse(data=table_data, safe=False)
return self.render_to_response(self.load_page())
def apply_filter(self, queryset, **kwargs):
parsed_request = parser.parse(self.request.GET.urlencode())
if search_value := parsed_request["search"]["value"]:
if self.searchable_fields:
queryset = queryset.annotate(
similarity=(
get_trig_vectors_for_different_inputs([
(field, search_value, )
for field in self.searchable_fields
])
)
).filter(similarity__gt=0.5)
return queryset
def get_row(self, obj):
row = {}
for column in self.columns:
row[column] = getattr(obj, column)
return row
def get_row_href(self, obj):
pass
def get_queryset(self, **kwargs):
return self.model.objects.all()
def get_row_identifier(self, row):
if self.row_identifier:
return getattr(row, self.row_identifier)
return row.pk
def order(self, queryset):
return queryset.order_by(*self.order_by())
def queryset_count(self, queryset):
q = queryset.all() # creates a new queryset object
# otherwise queryset argument is evaluated
return q.count()
def set_dt_row_data(self, obj, row):
row["DT_RowData"] = {
"pk": self.get_row_identifier(obj),
"href": self.get_row_href(obj)
}
return row
def get_table_data(self, **kwargs):
queryset = self.get_queryset(**kwargs)
# counts the set before filtering i.e. total
queryset_count = self.queryset_count(queryset)
queryset = self.apply_filter(queryset, **kwargs)
queryset = self.order(queryset)
paginator_object, page_object = self.paginate_objects(queryset)
rows = []
for obj in page_object.object_list:
row = self.get_row(obj)
row = self.set_dt_row_data(obj, row)
rows.append(row)
draw = int(self.request.GET.get("draw", 0))
recordsTotal = queryset_count
recordsFiltered = paginator_object.count # counts the filtered set
data = rows
return {
"draw": draw,
"recordsTotal": recordsTotal,
"recordsFiltered": recordsFiltered,
"data": data
}
def load_page(self, **kwargs):
return {}
def paginate_objects(self, objects):
"""
Only use this if you are using pagination. It isn't suitable for jQuery scroller because the
scroller will request slices which don't necessarily conform to the whole pages. For this see the
mixin class JQueryDataTableScrollMixin below.
"""
start = self.request.GET.get("start", 0)
paginate_by = self.request.GET.get("length", self.paginate_by)
paginator_obj = Paginator(objects, paginate_by)
page_number = int(int(start) / int(paginate_by)) + 1
try:
page_obj = paginator_obj.page(page_number)
except PageNotAnInteger:
page_obj = paginator_obj.page(1)
except EmptyPage:
page_obj = paginator_obj.page(paginator_obj.num_pages)
return paginator_obj, page_obj
def order_objects(self, objs):
"""
Sometimes it is not possible in Django to use the ORM, or it would be tricky,
so we have to order in python.
"""
orm_ordering = self.order_by()
ordering = []
for order in orm_ordering:
if order[0] == "-":
field = order[1:]
desc = True
else:
field = order
desc = False
ordering.append(
(lambda obj: get_value(obj, field), desc)
)
return sort_multiple(objs, *ordering)
def order_by(self):
ordering = [] # will pass this to ORM to order the fields correctly
# create objects out of GET params
# without this package the QueryDict object is tricky to use. We just want a nested dict
d = parser.parse(self.request.GET.urlencode())
# which this package gives us.
order = d.get("order")
columns = d.get("columns")
if order:
for order_index, ordered_column in order.items():
column_index = ordered_column.get("column")
try:
column_index = int(column_index)
if column_index >= 0:
try:
column = columns[column_index]
field_name = column.get("data")
if field_name:
order_dir = ordered_column.get("dir")
if order_dir in ["asc", "desc"]:
ordering.append(
("" if order_dir ==
"asc" else "-") + field_name
)
except IndexError as e:
break
except:
break
return ordering
class CustomFilterJQueryDataTableMixin:
"""
By default jQuery Datatables supports filtering by a single search input field.
Often times however we'll want to use our own form for filtering.
The form is rendered on the client like the table data, via ajax. So we must
render the form in the view.
"""
def get_table_data(self, **kwargs):
"""
get table data and filter form
"""
use_form = True if self.request.GET.get("use_adv_search") else False
if use_form:
form = self.get_filter_form(bind_form=True)
kwargs.update({"form": form})
else:
form = self.get_filter_form()
table_data = super().get_table_data(**kwargs)
ctx = {}
ctx.update(csrf(self.request))
if hasattr(self, "form_template"):
ctx["form"] = form
form_html = render_to_string(
self.form_template, ctx)
else:
form_html = render_crispy_form(form, context=ctx)
table_data["form"] = form_html
return table_data
def get_filter_form_kwargs(self, **kwargs):
form_kwargs = {}
if kwargs.get("bind_form"):
kwargs.pop("bind_form")
kwargs.update({"data": self.request.GET})
form_kwargs.update(kwargs)
return form_kwargs
def get_filter_form(self, **kwargs):
return self.filter_form_class(
**self.get_filter_form_kwargs(**kwargs)
)
def apply_filter(self, queryset, **kwargs):
if form := kwargs.get("form"):
if form.is_valid():
queryset = self.filter_form_valid(queryset, form)
else:
queryset = self.filter_form_invalid(queryset, form)
return queryset
def filter_form_valid(self, queryset, form):
return queryset
def filter_form_invalid(self, queryset, form):
return queryset
class JQueryDataTableScrollerMixin:
"""
Supports the scroller feature of jQueryDataTables.
"""
def paginate_objects(self, queryset_or_object_list):
start = self.request.GET.get("start", 0)
length = self.request.GET.get("length", 25)
s = Scroller(queryset_or_object_list, start, length)
return s, s.visible
class SalesAndPurchaseSearchMixin:
def apply_advanced_search(self, queryset, cleaned_data):
reference = cleaned_data.get("reference")
total = cleaned_data.get("total")
period = cleaned_data.get("period")
search_within = cleaned_data.get("search_within")
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
include_voided = cleaned_data.get("include_voided")
if reference:
queryset = (
queryset.annotate(
similarity=(
get_trig_vectors_for_different_inputs(
self.get_list_of_search_values_for_model_attrs(
cleaned_data)
)
)
).filter(similarity__gt=0.5)
)
if total:
queryset = queryset.filter(total=total)
if period:
queryset = queryset.filter(period=period)
if start_date:
q_object_start_date = Q()
if search_within == "any" or search_within == "tran":
q_object_start_date |= Q(date__gte=start_date)
if search_within == "any" or search_within == "due":
q_object_start_date |= Q(due_date__gte=start_date)
queryset = queryset.filter(q_object_start_date)
if end_date:
q_object_end_date = Q()
if search_within == "any" or search_within == "tran":
q_object_end_date |= Q(date__lte=end_date)
if search_within == "any" or search_within == "due":
q_object_end_date |= Q(due_date__lte=end_date)
queryset = queryset.filter(q_object_end_date)
if not include_voided:
queryset = queryset.exclude(status="v")
return queryset
class VatSearchMixin:
def apply_advanced_search(self, queryset, cleaned_data):
reference = cleaned_data.get("reference")
total = cleaned_data.get("total")
period = cleaned_data.get("period")
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
if reference:
queryset = (
queryset.annotate(
similarity=(
get_trig_vectors_for_different_inputs(
self.get_list_of_search_values_for_model_attrs(
cleaned_data)
)
)
).filter(similarity__gt=0.5)
)
if total:
queryset = queryset.filter(total=total)
if period:
queryset = queryset.filter(period=period)
if start_date:
queryset = queryset.filter(date__gte=start_date)
if end_date:
queryset = queryset.filter(date__lte=end_date)
return queryset
class NominalSearchMixin:
def apply_advanced_search(self, queryset, cleaned_data):
queryset = super().apply_advanced_search(queryset, cleaned_data)
include_brought_forwards = cleaned_data.get("include_brought_forwards")
if not include_brought_forwards:
queryset = queryset.exclude(type="nbf")
return queryset
class BaseTransactionsList(CustomFilterJQueryDataTableMixin,
JQueryDataTableMixin,
TemplateResponseMixin,
View):
column_transformers = {}
# keys are those fields you want to show form,
form_field_to_searchable_model_attr = {}
# values are those model attrs the form field maps to
def get_list_of_search_values_for_model_attrs(self, form_cleaned_data):
"""
Will be used for Trigram Search
"""
return [
(model_attr, form_cleaned_data.get(form_field, ""))
for form_field, model_attr in self.form_field_to_searchable_model_attr.items()
]
def load_page(self, **kwargs):
context_data = {}
context_data["columns"] = [field[0] for field in self.fields]
context_data["column_labels"] = [field[1] for field in self.fields]
return context_data
def get_row(self, obj):
for column, transformer in self.column_transformers.items():
obj[column] = transformer(obj[column])
return obj
def filter_form_valid(self, queryset, form):
return self.apply_advanced_search(queryset, form.cleaned_data)
def get_row_identifier(self, row):
if self.row_identifier:
return row[self.row_identifier]
return row["id"]
class VatTransList(VatSearchMixin, BaseTransactionsList):
pass
class CashBookTransList(VatSearchMixin, BaseTransactionsList):
pass
class NominalTransList(NominalSearchMixin, VatSearchMixin, BaseTransactionsList):
pass
class SalesAndPurchasesTransList(SalesAndPurchaseSearchMixin, BaseTransactionsList):
pass
class RESTBaseTransactionMixin:
def create_or_update_related_transactions(self, **kwargs):
self.create_or_update_nominal_transactions(**kwargs)
self.create_or_update_vat_transactions(**kwargs)
def get_transaction_type_object(self):
if hasattr(self, "transaction_type_object"):
return self.transaction_type_object
else:
self.transaction_type_object = self.header_obj.get_type_transaction()
return self.transaction_type_object
def lines_should_be_ordered(self):
if hasattr(self, "line"):
return self.line.get("can_order", True)
def get_header_prefix(self):
return self.header.get('prefix', 'header')
def get_header_form_kwargs(self):
kwargs = {
'prefix': self.get_header_prefix()
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs
def get_header_form(self):
if hasattr(self, "header_form"):
return self.header_form
form_class = self.header.get('form')
self.header_form = form_class(**self.get_header_form_kwargs())
return self.header_form
def requires_analysis(self, header_form):
t = None
if hasattr(header_form, "cleaned_data"):
t = header_form.cleaned_data.get("type")
else:
t = self.header_form.initial.get('type')
if t:
if t in self.get_header_model().get_types_requiring_analysis():
return True
return False
def get_header_model(self):
return self.header.get('model')
def requires_lines(self, header_form):
t = None
if hasattr(header_form, "cleaned_data"):
t = header_form.cleaned_data.get("type")
else:
t = self.header_form.initial.get('type')
if t:
if t in self.get_header_model().get_types_requiring_lines():
return True
return False
def get_line_model(self):
return self.line.get('model')
def get_line_formset_queryset(self):
return self.get_line_model().objects.none()
def get_line_prefix(self):
if hasattr(self, 'line'):
return self.line.get('prefix', 'line')
def get_line_formset_kwargs(self, header=None):
kwargs = {
'prefix': self.get_line_prefix(),
'queryset': self.get_line_formset_queryset()
}
if self.request.method in ('POST', 'PUT'):
if self.requires_lines(self.header_form):
kwargs.update({
'data': self.request.POST
})
kwargs.update({
'header': header
})
if (self.requires_lines(self.header_form) and not self.requires_analysis(self.header_form)):
brought_forward = True
else:
brought_forward = False
# a flag used for UI rendering e.g. hide the nominal column
kwargs["brought_forward"] = brought_forward
# and to decide whether the field is required server side
return kwargs
def get_line_formset(self, header=None):
if hasattr(self, 'line'):
if hasattr(self, 'line_formset'):
return self.line_formset
else:
formset_class = self.line.get('formset')
formset = formset_class(**self.get_line_formset_kwargs(header))
formset.helper = FormHelper()
formset.helper.template = self.line_formset_template
return formset
def flag_invalid_forms(self):
"""
We don't actuallly need the header_form_valid flag because
if the form is successful the header_form.instance is passed to
the subsequent line and match formsets.
"""
if self.header_form.is_valid():
header_form_valid = True
else:
header_form_valid = False
line_formset = None
if hasattr(self, "line_formset"):
line_formset = self.line_formset
else:
if self.requires_lines(self.header_form):
self.line_formset = line_formset = self.get_line_formset()
if line_formset:
line_formset.header_form_valid = header_form_valid
def invalid_forms(self):
self.forms_invalid = True
self.flag_invalid_forms()
return self.render_to_response(self.get_context_data())
def post(self, request, *args, **kwargs):
self.header_form = self.get_header_form()
if self.header_form.is_valid():
self.header_obj = self.header_form.save(commit=False)
self.line_formset = self.get_line_formset(self.header_obj)
self.line_formset.header_form_valid = True
if self.line_formset.is_valid():
self.header_obj.save()
self.lines_are_valid()
else:
return self.invalid_forms()
else:
return self.invalid_forms()
return self.get_successful_response()
class BaseTransaction(
RESTBaseTransactionMixin,
TemplateResponseMixin,
ContextMixin,
View):
line_formset_template = "accounts/line_formset.html"
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_successful_response(self):
messages.success(
self.request,
self.get_success_message()
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if creation_type := self.request.POST.get('approve'):
if creation_type == "add_another":
# the relative path including the GET parameters e.g. /purchases/create?t=i
return self.request.get_full_path()
return self.success_url
def get_context_data(self, **kwargs):
if 'header_form' not in kwargs:
kwargs["header_form"] = self.get_header_form()
if 'header_prefix' not in kwargs:
kwargs['header_form_prefix'] = self.get_header_prefix()
if self.requires_lines(kwargs["header_form"]):
if 'line_form_prefix' not in kwargs:
kwargs["line_form_prefix"] = self.get_line_prefix()
if 'line_formset' not in kwargs:
kwargs["line_formset"] = self.get_line_formset()
if 'forms_invalid' not in kwargs:
if hasattr(self, 'forms_invalid'):
kwargs['forms_invalid'] = self.forms_invalid
if 'negative_transaction_types' not in kwargs:
# calculator.js needs this
kwargs['negative_transaction_types'] = self.get_header_model().negatives
if hasattr(self, 'create_on_the_fly'):
for form in self.create_on_the_fly:
kwargs[form] = self.create_on_the_fly[form]
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
return self.render_to_response(self.get_context_data())
class RESTBaseCreateTransactionMixin:
permission_action = 'create'
def get_header_form_type(self):
if not (t := self.request.GET.get("t")):
return self.default_type
return t
def create_or_update_nominal_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
"vat_nominal_name": settings.DEFAULT_VAT_NOMINAL,
})
transaction_type_object = self.get_transaction_type_object()
self.nom_trans = transaction_type_object.create_nominal_transactions(
self.nominal_model,
self.nominal_transaction_model,
**kwargs
)
def create_or_update_vat_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
})
transaction_type_object = self.get_transaction_type_object()
self.vat_trans = transaction_type_object.create_vat_transactions(
self.vat_transaction_model,
**kwargs
)
def lines_are_valid(self):
line_no = 1
lines = []
line_forms = self.line_formset.ordered_forms if self.lines_should_be_ordered(
) else self.line_formset
for form in line_forms:
if form.empty_permitted and form.has_changed():
line = form.save(commit=False)
line.header = self.header_obj
line.type = self.header_obj.type
line.line_no = line_no
lines.append(line)
line_no = line_no + 1
if lines:
self.lines = self.get_line_model().objects.audited_bulk_create(lines)
self.create_or_update_related_transactions(lines=lines)
def get_header_form_kwargs(self):
kwargs = super().get_header_form_kwargs()
if self.request.method in ('GET'):
kwargs["initial"] = {
"type": self.get_header_form_type()
}
return kwargs
class BaseCreateTransaction(
RESTBaseCreateTransactionMixin,
BaseTransaction):
def get_success_message(self):
return "Transaction was created successfully."
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["create"] = True # some javascript templates depend on this
context["header_type"] = self.get_header_form_type()
return context
class CreateCashBookEntriesMixin:
def get_cash_book_transaction_model(self):
return self.cash_book_transaction_model
def create_or_update_cash_book_transactions(self, **kwargs):
self.transaction_type_object.create_cash_book_entry(
self.get_cash_book_transaction_model(),
**kwargs
)
def create_or_update_related_transactions(self, **kwargs):
super().create_or_update_related_transactions(**kwargs)
self.create_or_update_cash_book_transactions(**kwargs)
class CreateCashBookTransaction(CreateCashBookEntriesMixin, BaseCreateTransaction):
pass
class BaseMatchingMixin:
matching_formset_template = "accounts/whole_uni_formset.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["match_formset"] = self.get_match_formset()
context["match_form_prefix"] = self.get_match_prefix()
return context
def get_match_model(self):
return self.match.get('model')
def get_match_prefix(self):
if hasattr(self, 'match'):
return self.match.get('prefix', 'match')
def get_match_formset_queryset(self):
return self.get_match_model().objects.none()
def get_match_formset_kwargs(self, header=None):
kwargs = {
'prefix': self.get_match_prefix(),
'queryset': self.get_match_formset_queryset(),
'match_by': header
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs
def get_match_formset(self, header=None):
if hasattr(self, 'match'):
if hasattr(self, 'match_formset'):
return self.match_formset
else:
formset_class = self.match.get('formset')
f = formset_class(**self.get_match_formset_kwargs(header))
f.helper = FormHelper()
f.helper.template = self.matching_formset_template
return f
def flag_invalid_forms(self):
super().flag_invalid_forms()
if self.header_form.is_valid():
header_form_valid = True
else:
header_form_valid = False
lines_are_valid = None
if hasattr(self, "line_formset"):
if self.line_formset.is_valid():
lines_are_valid = True
else:
lines_are_valid = False
if hasattr(self, "match_formset"):
match_formset = self.match_formset
else:
self.match_formset = match_formset = self.get_match_formset()
match_formset.header_form_valid = header_form_valid
if lines_are_valid is not None:
match_formset.lines_are_valid = lines_are_valid
def post(self, request, *args, **kwargs):
self.header_form = self.get_header_form()
if self.header_form.is_valid():
self.header_obj = self.header_form.save(commit=False)
self.line_formset = self.get_line_formset(self.header_obj)
self.match_formset = self.get_match_formset(self.header_obj)
if not self.requires_lines(self.header_form):
if self.match_formset.is_valid():
self.header_obj.save()
self.create_or_update_related_transactions()
self.matching_is_valid()
messages.success(
request,
self.get_success_message()
)
else:
return self.invalid_forms()
else:
# TODO - remove this seemingly needless check
if self.line_formset and self.match_formset:
if self.line_formset.is_valid() and self.match_formset.is_valid():
self.header_obj.save()
self.lines_are_valid()
self.matching_is_valid()
messages.success(
request,
self.get_success_message()
)
else:
return self.invalid_forms()
else:
return self.invalid_forms()
return HttpResponseRedirect(self.get_success_url())
class CreateMatchingMixin(BaseMatchingMixin):
def matching_is_valid(self):
matches = []
for form in self.match_formset:
if form.empty_permitted and form.has_changed():
match = form.save(commit=False)
match.matched_by_type = match.matched_by.type
match.matched_to_type = match.matched_to.type
match.period = self.header_obj.period
if match.value != 0:
matches.append(match)
if matches:
self.get_header_model().objects.audited_bulk_update(
self.match_formset.headers,
['due', 'paid']
)
self.get_match_model().objects.audited_bulk_create(matches)
class CreatePurchaseOrSalesTransaction(
CreateMatchingMixin,
CreateCashBookEntriesMixin,
BaseCreateTransaction):
def create_or_update_nominal_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
"control_nominal_name": self.control_nominal_name,
"vat_nominal_name": settings.DEFAULT_VAT_NOMINAL,
})
# e.g. Invoice, CreditNote etc
transaction_type_object = self.get_transaction_type_object()
transaction_type_object.create_nominal_transactions(
self.nominal_model,
self.nominal_transaction_model,
**kwargs
)
class RESTIndividualTransactionForHeaderMixin:
def get_header_form_kwargs(self):
kwargs = super().get_header_form_kwargs()
if not hasattr(self, 'main_header'):
raise AttributeError(
f"{self.__class__.__name__} has no 'main_header' attribute. Did you override "
"setup() and forget to class super()?"
)
kwargs["instance"] = self.main_header
return kwargs
class RESTIndividualTransactionMixin:
def get_line_formset_kwargs(self, header=None):
kwargs = super().get_line_formset_kwargs(header)
if not header:
kwargs["header"] = self.main_header
return kwargs
def get_line_formset_queryset(self):
return self.get_line_model().objects.filter(header=self.main_header)
class IndividualTransactionMixin:
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
pk = kwargs.get('pk')
header = get_object_or_404(self.get_header_model(), pk=pk)
self.main_header = header
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["main_header"] = self.main_header
context["edit"] = self.main_header.pk
return context
class RESTBaseEditTransactionMixin:
permission_action = 'edit'
def create_or_update_nominal_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
"vat_nominal_name": settings.DEFAULT_VAT_NOMINAL,
})
transaction_type_object = self.get_transaction_type_object()
self.nom_trans = transaction_type_object.edit_nominal_transactions(
self.nominal_model,
self.nominal_transaction_model,
**kwargs
)
def create_or_update_vat_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
})
transaction_type_object = self.get_transaction_type_object()
self.vat_trans = transaction_type_object.edit_vat_transactions(
self.vat_transaction_model,
**kwargs
)
def dispatch(self, request, *args, **kwargs):
if self.main_header.is_void():
return HttpResponseForbidden("Void transactions cannot be edited")
return super().dispatch(request, *args, **kwargs)
def lines_are_valid(self):
self.line_formset.save(commit=False)
self.lines_to_delete = self.line_formset.deleted_objects
line_forms = self.line_formset.ordered_forms if self.lines_should_be_ordered(
) else self.line_formset
lines_to_be_created_or_updated_only = [] # excluding those to delete
for form in line_forms:
if form.empty_permitted and form.has_changed():
lines_to_be_created_or_updated_only.append(form)
elif not form.empty_permitted and form.instance not in self.lines_to_delete:
lines_to_be_created_or_updated_only.append(form)
line_no = 1
lines_to_update = []
for form in lines_to_be_created_or_updated_only:
if form.empty_permitted and form.has_changed():
form.instance.header = self.header_obj
form.instance.line_no = line_no
form.instance.type = self.header_obj.type
line_no = line_no + 1
elif not form.empty_permitted:
if form.instance.is_non_zero():
form.instance.line_no = line_no
form.instance.type = self.header_obj.type
line_no = line_no + 1
lines_to_update.append(form.instance)
else:
self.line_formset.deleted_objects.append(form.instance)
self.lines_to_update = lines_to_update
self.new_lines = new_lines = self.get_line_model(
).objects.audited_bulk_create(self.line_formset.new_objects)
self.get_line_model().objects.audited_bulk_update(lines_to_update)
bulk_delete_with_history(
self.line_formset.deleted_objects,
self.get_line_model()
)
if self.requires_analysis(self.header_form):
existing_nom_trans = self.nominal_transaction_model.objects.filter(
module=self.module,
header=self.header_obj.pk)
existing_vat_trans = self.vat_transaction_model.objects.filter(
module=self.module, header=self.header_obj.pk)
self.create_or_update_related_transactions(
new_lines=new_lines,
lines_to_update=lines_to_update,
deleted_lines=self.line_formset.deleted_objects,
existing_nom_trans=existing_nom_trans,
existing_vat_trans=existing_vat_trans
)
class ViewTransactionAuditMixin:
def get_audit(self):
header = self.main_header
audit = AuditTransaction(
header,
self.get_header_model(),
self.get_line_model()
)
return audit.get_historical_changes()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["audits"] = self.get_audit()
context["multi_object_audit"] = True
return context
class BaseEditTransaction(RESTBaseEditTransactionMixin,
RESTIndividualTransactionForHeaderMixin,
RESTIndividualTransactionMixin,
IndividualTransactionMixin,
ViewTransactionAuditMixin,
BaseTransaction):
def get_success_message(self):
return "Transaction was edited successfully."
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["edit_mode"] = "1" # js script interpretes this as truthy
return context_data
class EditMatchingMixin(CreateMatchingMixin):
matching_formset_template = "accounts/edit_matching_formset.html"
def get_match_formset_queryset(self):
return (
self.get_match_model()
.objects
.filter(Q(matched_by=self.main_header) | Q(matched_to=self.main_header))
.select_related('matched_by')
.select_related('matched_to')
)
def get_match_formset(self, header=None):
header = self.main_header
return super().get_match_formset(header)
def matching_is_valid(self):
self.match_formset.save(commit=False)
to_create = [
m.instance
for m in self.match_formset
if not m.instance.pk and m.instance.value
]
to_update = [
m.instance
for m in self.match_formset
if m.instance.pk and m.instance.value
]
to_delete = [
m.instance
for m in self.match_formset
if m.instance.pk and not m.instance.value
]
for match in to_create + to_update:
if match.matched_by_id == self.header_obj.pk:
match.matched_by_type = self.header_obj.type
match.matched_to_type = match.matched_to.type
match.period = self.header_obj.period
else:
match.matched_by_type = match.matched_by.type
match.matched_to_type = self.header_obj.type
self.get_match_model().objects.audited_bulk_create(to_create)
self.get_match_model().objects.audited_bulk_update(
to_update, ['value', 'matched_by_type', 'matched_to_type', 'period'])
bulk_delete_with_history(
to_delete,
self.get_match_model()
)
self.get_header_model().objects.audited_bulk_update(
self.match_formset.headers,
['due', 'paid']
)
class NominalTransactionsMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
nom_trans = (
self.nominal_transaction_model
.objects
.select_related("nominal__name")
.filter(header=self.main_header.pk)
.filter(module=self.module)
.values("nominal__name")
.annotate(total=Sum("value"))
)
context["nominal_transactions"] = nom_trans
return context
class EditCashBookEntriesMixin(CreateCashBookEntriesMixin):
def lines_are_valid(self):
super().lines_are_valid()
if not self.requires_analysis(self.header_form):
# cash book is unlike other ledgers -
# bf on cash book should still post cash book transaction
# bf on SL and PL does not post relate trans (cb, nominal, vat)
# bf does not exist on nominal
self.create_or_update_related_transactions()
def create_or_update_cash_book_transactions(self, **kwargs):
self.transaction_type_object.edit_cash_book_entry(
self.get_cash_book_transaction_model(),
**kwargs
)
class EditCashBookTransaction(
EditCashBookEntriesMixin,
NominalTransactionsMixin,
BaseEditTransaction):
pass
class ViewSaleOrPurchaseTransactionAuditMixin:
def get_audit(self):
header = self.main_header
audit = AuditTransaction(
header,
self.get_header_model(),
self.get_line_model(),
self.get_match_model()
)
return audit.get_historical_changes()
class EditPurchaseOrSalesTransaction(
EditCashBookEntriesMixin,
NominalTransactionsMixin,
EditMatchingMixin,
ViewSaleOrPurchaseTransactionAuditMixin,
BaseEditTransaction):
def create_or_update_nominal_transactions(self, **kwargs):
kwargs.update({
"line_cls": self.get_line_model(),
"control_nominal_name": self.control_nominal_name,
"vat_nominal_name": settings.DEFAULT_VAT_NOMINAL,
})
transaction_type_object = self.get_transaction_type_object()
transaction_type_object.edit_nominal_transactions(
self.nominal_model,
self.nominal_transaction_model,
**kwargs
)
class BaseViewTransaction(
ViewTransactionAuditMixin,
DetailView):
"""
No REST BASE exists for view yet. Remember to move permission_action
to this class when it is created
"""
permission_action = 'view'
context_object_name = "header"
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.main_header = self.object = self.get_object() # need this before dispatch for
# TransactionPermissionMixin
def get(self, request, *args, **kwargs):
# self.object = self.get_object(). Set in setup instead. See above.
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_header_model(self):
return self.model
def get_line_model(self):
return self.line_model
def get_void_form_kwargs(self, header):
return {
"prefix": "void",
"initial": {"id": header.pk}
}
def get_void_form(self, header=None):
return self.void_form(
self.model,
self.get_void_form_action(header=header),
**self.get_void_form_kwargs(header=header)
)
def get_void_form_action(self, header):
return reverse(self.void_form_action, kwargs={"pk": header.pk})
def get_edit_view_name(self):
return self.edit_view_name
def get_context_data(self, **kwargs):
self.main_header = header = self.object
context = super().get_context_data(**kwargs)
context["lines"] = lines = self.line_model.objects.select_related(
"header").filter(header=header)
context["void_form"] = self.get_void_form(header=header)
context["module"] = self.module
context["edit_view_name"] = self.get_edit_view_name()
context["edit_mode"] = "" # js script interprets this as a Falsey
return context
class MatchingViewTransactionMixin:
def get_match_model(self):
return self.match_model
def get_context_data(self, **kwargs):
self.main_header = header = self.object
context = super().get_context_data(**kwargs)
matches = (
self.match_model
.objects
.select_related("matched_by")
.select_related("matched_to")
.filter(
Q(matched_by=header) | Q(matched_to=header)
)
)
match_objs = []
for match in matches:
if match.matched_by_id == header.pk:
match_obj = {
"pk": match.pk,
"type": match.matched_to.get_type_display(),
"ref": match.matched_to.ref,
"total": match.matched_to.ui_total,
"paid": match.matched_to.ui_paid,
"due": match.matched_to.ui_due,
"value": match.ui_match_value(match.matched_to, match.value)
}
else:
match_obj = {
"pk": match.pk,
"type": match.matched_by.get_type_display(),
"ref": match.matched_by.ref,
"total": match.matched_by.ui_total,
"paid": match.matched_by.ui_paid,
"due": match.matched_by.ui_due,
"value": match.ui_match_value(match.matched_by, -1 * match.value)
}
match_objs.append(match_obj)
context["matches"] = match_objs
return context
class SaleAndPurchaseViewTransaction(
NominalTransactionsMixin,
MatchingViewTransactionMixin,
ViewSaleOrPurchaseTransactionAuditMixin,
BaseViewTransaction):
pass
class BaseVoidTransaction(
IndividualTransactionMixin,
View):
http_method_names = ['post']
permission_action = "void"
def get_success_url(self):
return self.success_url
def update_headers(self):
self.header_model.objects.audited_bulk_update(
self.headers_to_update,
["paid", "due", "status"]
)
def delete_related(self):
(
self.nominal_transaction_model
.objects
.filter(module=self.module)
.filter(header=self.transaction_to_void.pk)
.delete()
)
(
self.vat_transaction_model
.objects
.filter(module=self.module)
.filter(header=self.transaction_to_void.pk)
.delete()
)
def form_is_valid(self):
self.success = True
self.transaction_to_void = self.form.instance
self.transaction_to_void.status = "v"
self.headers_to_update = [self.transaction_to_void]
self.update_headers()
self.delete_related()
def form_is_invalid(self):
self.success = False
non_field_errors = self.form.non_field_errors()
self.error_message = render_to_string(
"messages.html", {"messages": [non_field_errors[0]]})
def get_header_model(self):
# we do not need this for the void views
# but other mixins rely on this
# TODO - remove these unpythonic getters
return self.header_model
def get_form_prefix(self):
return self.form_prefix
def get_void_form_kwargs(self):
return {
"data": self.request.POST,
"prefix": self.get_form_prefix()
}
def get_void_form(self):
form_action = None # does not matter for the form with this view
return self.form(self.header_model, form_action, **self.get_void_form_kwargs())
def post(self, request, *args, **kwargs):
self.form = form = self.get_void_form()
if form.is_valid():
self.form_is_valid()
return JsonResponse(
data={
"success": self.success,
"href": self.get_success_url()
}
)
else:
self.form_is_invalid()
return JsonResponse(
data={
"success": self.success,
"error_message": self.error_message
}
)
class SaleAndPurchaseVoidTransaction(BaseVoidTransaction):
def get_void_form_kwargs(self):
kwargs = super().get_void_form_kwargs()
kwargs.update({
"matching_model": self.matching_model
})
return kwargs
def form_is_valid(self):
self.success = True
self.transaction_to_void = self.form.instance
self.transaction_to_void.status = "v"
self.headers_to_update = [self.transaction_to_void]
matches = self.form.matches
matching_model = self.matching_model
for match in matches:
if match.matched_by_id == self.transaction_to_void.pk:
# value is the amount of the matched_to transaction that was matched
# e.g. transaction_to_void is 120.00 payment and matched to 120.00 invoice
# value = 120.00
self.transaction_to_void.paid += match.value
self.transaction_to_void.due -= match.value
match.matched_to.paid -= match.value
match.matched_to.due += match.value
self.headers_to_update.append(match.matched_to)
else:
# value is the amount of the transaction_to_void which was matched
# matched_by is an invoice for 120.00 and matched_to is a payment for 120.00
# value is -120.00
self.transaction_to_void.paid -= match.value
self.transaction_to_void.due += match.value
match.matched_by.paid += match.value
match.matched_by.due -= match.value
self.headers_to_update.append(match.matched_by)
bulk_delete_with_history(
matches,
matching_model
)
self.update_headers()
self.delete_related()
class DeleteCashBookTransMixin:
def delete_related(self):
super().delete_related()
transaction_to_void = self.form.instance
(
self.cash_book_transaction_model
.objects
.filter(module=self.module)
.filter(header=self.transaction_to_void.pk)
.delete()
)
class AgeMatchingReportMixin(
JQueryDataTableScrollerMixin,
CustomFilterJQueryDataTableMixin,
JQueryDataTableMixin,
TemplateResponseMixin,
View):
show_trans_columns = [
# add the subclasses' contact_field_name here
'date',
{
'label': 'Due Date',
'field': 'due_date'
},
'ref',
'total',
'unallocated',
'current',
'1 month',
'2 month',
'3 month',
{
'label': '4 Month & Older',
'field': '4 month'
}
]
column_transformers = {
"date": lambda d: d.strftime('%d %b %Y') if d and not isinstance(d, JSONBlankDate) else "",
# payment trans do not have due dates
"due_date": lambda d: d.strftime('%d %b %Y') if d and not isinstance(d, JSONBlankDate) else ""
}
def filter_by_contact(self, transactions, from_contact, to_contact):
"""
`transactions` could be individual or the summary transaction for a supplier
or customer
"""
filtered_by_contact = []
if from_contact or to_contact:
for tran in transactions:
contact_pk = tran["meta"]["contact_pk"]
if from_contact and to_contact:
if contact_pk >= from_contact.pk and contact_pk <= to_contact.pk:
filtered_by_contact.append(tran)
elif from_contact:
if contact_pk >= from_contact.pk:
filtered_by_contact.append(tran)
elif to_contact:
if contact_pk <= to_contact.pk:
filtered_by_contact.append(tran)
return filtered_by_contact
return transactions
def aggregate_is_zero(self, aggregate):
if (
aggregate["total"] or aggregate["unallocated"] or aggregate["current"]
or aggregate["1 month"] or aggregate["2 month"] or aggregate["3 month"]
or aggregate["4 month"]
):
return False
else:
return True
def create_report_transaction(self, header, report_period):
# header e.g. PurchaseHeader or SaleHeader
contact_field_name = self.contact_field_name
report_tran = {
"meta": {
"contact_pk": getattr(header, contact_field_name).pk
},
contact_field_name: getattr(header, contact_field_name).name,
"date": header.date,
# JSONBlankDate just returns "" instead of the datetime when serialized.
# we need this because otherwise the order_objects cannot work
# i.e. str < date object will not work
"due_date": header.due_date or JSONBlankDate(1900, 1, 1),
"ref": header.ref,
"total": header.total,
}
if header.is_payment_type():
report_tran["unallocated"] = header.due
report_tran["current"] = 0
report_tran["1 month"] = 0
report_tran["2 month"] = 0
report_tran["3 month"] = 0
report_tran["4 month"] = 0
else:
report_tran["unallocated"] = 0
if header.period == report_period:
report_tran["current"] = header.due
else:
report_tran["current"] = 0
try:
if header.period == report_period - 1:
report_tran["1 month"] = header.due
else:
report_tran["1 month"] = 0
except MissingPeriodError:
report_tran["1 month"] = 0
try:
if header.period == report_period - 2:
report_tran["2 month"] = header.due
else:
report_tran["2 month"] = 0
except MissingPeriodError:
report_tran["2 month"] = 0
try:
if header.period == report_period - 3:
report_tran["3 month"] = header.due
else:
report_tran["3 month"] = 0
except MissingPeriodError:
report_tran["3 month"] = 0
try:
if header.period <= report_period - 4:
report_tran["4 month"] = header.due
else:
report_tran["4 month"] = 0
except MissingPeriodError:
report_tran["4 month"] = 0
return report_tran
def aggregate_transactions(self, transactions):
def _aggregate_transactions(x, y):
x["unallocated"] += y["unallocated"]
x["total"] += y["total"]
x["current"] += y["current"]
x["1 month"] += y["1 month"]
x["2 month"] += y["2 month"]
x["3 month"] += y["3 month"]
x["4 month"] += y["4 month"]
return x
aggregate = functools.reduce(_aggregate_transactions, transactions)
aggregate["ref"] = ''
aggregate["date"] = ''
aggregate["due_date"] = ''
return aggregate
def load_page(self):
context = {}
mod_settings = ModuleSettings.objects.first()
current_period = getattr(mod_settings, self.module_setting_name)
form = self.get_filter_form(
initial={"period": current_period, "show_transactions": True})
context["form"] = form
context["columns"] = columns = []
show_trans_columns = self.show_trans_columns.copy()
show_trans_columns.insert(0, self.contact_field_name)
for column in show_trans_columns:
if type(column) is type(""):
columns.append({
"label": column.title(),
"field": column
})
elif isinstance(column, dict):
columns.append(column)
from_contact_field, to_contact_field = self.get_contact_range_field_names()
context["contact_field_name"] = self.contact_field_name
context["from_contact_field"] = from_contact_field
context["to_contact_field"] = to_contact_field
return context
def get_contact_range_field_names(self):
return self.contact_range_field_names
def get_row_identifier(self, obj):
return
def get_row(self, obj):
for column, transformer in self.column_transformers.items():
obj[column] = transformer(obj[column])
return obj
def queryset_count(self, filtered_and_ordered_transactions):
return len(filtered_and_ordered_transactions)
def order(self, filtered_transactions):
return self.order_objects(filtered_transactions)
def filter_form_valid(self, transactions, form):
from_contact_field, to_contact_field = self.get_contact_range_field_names()
from_contact = form.cleaned_data.get(from_contact_field)
to_contact = form.cleaned_data.get(to_contact_field)
period = form.cleaned_data.get("period")
# only filter applied so far is `period` but for the purpose of recordsFiltered which jQueryDataTable needs,
# this does not count because it is a necessary filter
# now we filter by the contact below. This does count and so it is the first real filter (i.e. optional)
if form.cleaned_data.get("show_transactions"):
report_trans = []
for tran in transactions:
report_trans.append(
self.create_report_transaction(tran, period)
)
else:
report_trans = transactions
return self.filter_by_contact(report_trans, from_contact, to_contact)
def filter_form_invalid(self, queryset, form):
return []
def get_queryset(self, **kwargs):
q = super().get_queryset(**kwargs)
queryset = q.select_related(self.contact_field_name)
form = kwargs["form"]
if not form.is_valid():
return []
contact_field_name = self.contact_field_name
from_contact_field, to_contact_field = self.get_contact_range_field_names()
from_contact = form.cleaned_data.get(from_contact_field)
to_contact = form.cleaned_data.get(to_contact_field)
period = form.cleaned_data.get("period")
# queryset is simply the whole set of PL or SL transactions
queryset = queryset.exclude(status="v").filter(period__lte=period).order_by(
contact_field_name) # must order in case
# we need to group by contact_field_name below
transactions = self.match_model.get_not_fully_matched_at_period(
list(queryset), period)
if not form.cleaned_data.get("show_transactions"):
contact_trans = groupby(
transactions, key=lambda t: getattr(t, self.contact_field_name))
aggregates = []
for contact, trans in contact_trans:
report_trans = [
self.create_report_transaction(tran, period)
for tran in trans
]
aggregate = self.aggregate_transactions(report_trans)
if not self.aggregate_is_zero(aggregate):
aggregates.append(aggregate)
aggregates = list(chain(aggregates))
return aggregates
return transactions
class LoadMatchingTransactions(
JQueryDataTableScrollerMixin,
JQueryDataTableMixin,
TemplateResponseMixin,
View):
def set_dt_row_data(self, obj, row):
row["DT_RowData"] = {
"pk": self.get_row_identifier(obj),
"fields": {
"type": {
'value': obj.type,
'order': obj.type
},
"ref": {
'value': obj.ref,
'order': obj.ref
},
"total": {
'value': obj.ui_total,
'order': obj.ui_total
},
"paid": {
'value': obj.ui_paid,
'order': obj.ui_paid
},
"due": {
'value': obj.ui_due,
'order': obj.ui_due
},
"matched_to": {
'value': obj.pk,
'order': obj.pk
}
}
}
return row
def get_row(self, obj):
return {
"type": {
"label": obj.get_type_display(),
"value": obj.type
},
"ref": obj.ref,
"total": obj.ui_total,
"paid": obj.ui_paid,
"due": obj.ui_due
}
def apply_filter(self, queryset, **kwargs):
if contact := self.request.GET.get("s"):
contact_name = self.contact_name
queryset = (
queryset
.filter(**{contact_name: contact})
.exclude(due__exact=0)
.exclude(status="v")
)
if period := self.request.GET.get('period'):
queryset = queryset.filter(
period__fy_and_period__lte=Subquery(
Period
.objects
.filter(pk=period)
.values('fy_and_period')
)
)
if edit := self.request.GET.get("edit"):
matches = (
self.match_model.objects.filter(
Q(matched_to=edit) | Q(matched_by=edit))
)
matches = [(match.matched_by_id, match.matched_to_id)
for match in matches]
matched_headers = list(chain(*matches))
pk_to_exclude = [header for header in matched_headers]
# at least exclude the record being edited itself !!!
pk_to_exclude.append(edit)
queryset = queryset.exclude(pk__in=pk_to_exclude)
else:
queryset = queryset.none()
return queryset
|
419849
|
expected_output = {
"my_state": "13 -ACTIVE",
"peer_state": "1 -DISABLED",
"mode": "Simplex",
"unit": "Primary",
"unit_id": 48,
"redundancy_mode_operational": "Non-redundant",
"redundancy_mode_configured": "Non-redundant",
"redundancy_state": "Non Redundant",
"maintenance_mode": "Disabled",
"manual_swact": "disabled",
"manual_swact_reason": "system is simplex (no peer unit)",
"communications": "Down",
"communications_reason": "Simplex mode",
"client_count": 111,
"client_notification_tmr_msec": 30000,
"rf_debug_mask": "0x0",
}
|
419893
|
from sqllineage.core.models import Column, Table
from sqllineage.runner import LineageRunner
def assert_table_lineage_equal(sql, source_tables=None, target_tables=None):
lr = LineageRunner(sql)
for (_type, actual, expected) in zip(
["Source", "Target"],
[lr.source_tables, lr.target_tables],
[source_tables, target_tables],
):
actual = set(actual)
expected = set() if expected is None else {Table(t) for t in expected}
assert (
actual == expected
), f"\n\tExpected {_type} Table: {expected}\n\tActual {_type} Table: {actual}"
def assert_column_lineage_equal(sql, column_lineages=None):
expected = (
{
(
Column(lineage[0]),
Column(lineage[1]),
)
for lineage in column_lineages
}
if column_lineages
else set()
)
lr = LineageRunner(sql)
actual = {(lineage[0], lineage[-1]) for lineage in set(lr.get_column_lineage())}
assert (
set(actual) == expected
), f"\n\tExpected Lineage: {expected}\n\tActual Lineage: {actual}"
|
419897
|
import numpy as np
import matplotlib.pyplot as plt
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
DSET_MEAN = [0.611, 0.506, 0.54]
DSET_STD = [0.14, 0.16, 0.165]
label_colours = np.array([Sky, Building, Pole, Road])
def view_annotated(tensor, plot=True):
temp = tensor.numpy()
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0,3):
r[temp==l]=label_colours[l,0]
g[temp==l]=label_colours[l,1]
b[temp==l]=label_colours[l,2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:,:,0] = (r/255.0)#[:,:,0]
rgb[:,:,1] = (g/255.0)#[:,:,1]
rgb[:,:,2] = (b/255.0)#[:,:,2]
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def decode_image(tensor):
inp = tensor.numpy().transpose((1, 2, 0))
mean = np.array(DSET_MEAN)
std = np.array(DSET_STD)
# inp = std * inp + mean
return inp
def view_image(tensor):
inp = decode_image(tensor)
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.show()
def view_image_Gray(tensor, plot=True):
temp = np.transpose(tensor.numpy(),(2,0,1))
temp = temp[0]
# print('temp.shape',temp.shape)#(256, 256, 1)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0,3):
r[temp==l]=label_colours[l,0]
g[temp==l]=label_colours[l,1]
b[temp==l]=label_colours[l,2]
# nu = 255.0
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:,:,0] = (r/255.0)#[:,:,0]
rgb[:,:,1] = (g/255.0)#[:,:,1]
rgb[:,:,2] = (b/255.0)#[:,:,2]
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def save_image(tensor):
inp = decode_image(tensor)
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.show()
|
419902
|
from setuptools import setup, find_packages
version = "0.24"
setup(name="staffjoy",
packages=find_packages(),
version=version,
description="Staffjoy API Wrapper in Python",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="https://github.com/staffjoy/client_python",
download_url="https://github.com/StaffJoy/client_python/archive/%s.tar.gz" % version,
keywords=["staffjoy-api", "staffjoy", "staff joy"],
install_requires=["requests[security]"], )
|
419919
|
import requests
from qanta.datasets.quiz_bowl import QuestionDatabase
query = 'https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&format=json&titles={}'
answers = QuestionDatabase().all_answers().values()
for answer in answers:
r =
requests.get(query.format(answer))
r = r.json()
|
419933
|
import src.cli.console as console
def container_list(data):
if len(data.spec.containers) <= 1:
return None
container = console.list(
message="Please select a container",
message_no_choices="No container is running.",
choices=[c.name for c in data.spec.containers],
)
if container is None:
return None
return container
|
419950
|
import socket
def get_ip_address(domain_name):
print("[+]Obtaining IP Address")
try:
ip_address=socket.gethostbyname(domain_name)
return ip_address
except:
print("[!]Unable to get IP Address")
|
419970
|
import discord
from discord.ext import commands
from discord.ext.commands.errors import *
from modules.helpers import PREFIX, InsufficientFundsException
class Handlers(commands.Cog, name='handlers'):
def __init__(self, client: commands.Bot):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print(self.client.user.name + " is ready")
try:
await self.client.change_presence(
activity=discord.Game(f"blackjack | {PREFIX}help")
)
except:
pass
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error):
if hasattr(ctx.command, 'on_error'):
return
if isinstance(error, CommandInvokeError):
await self.on_command_error(ctx, error.original)
elif isinstance(error, CommandNotFound):
await ctx.invoke(self.client.get_command('help'))
elif isinstance(error, (MissingRequiredArgument,
TooManyArguments, BadArgument)):
await ctx.invoke(self.client.get_command('help'), ctx.command.name)
elif isinstance(error, (UserNotFound, MemberNotFound)):
await ctx.send(f"Member, `{error.argument}`, was not found.")
elif isinstance(error, MissingPermissions):
await ctx.send("Must have following permission(s): " +
", ".join([f'`{perm}`' for perm in error.missing_perms]))
elif isinstance(error, BotMissingPermissions):
await ctx.send("I must have following permission(s): " +
", ".join([f'`{perm}`' for perm in error.missing_perms]))
elif isinstance(error, InsufficientFundsException):
await ctx.invoke(self.client.get_command('money'))
elif isinstance(error, CommandOnCooldown):
s = int(error.retry_after)
s = s % (24 * 3600)
h = s // 3600
s %= 3600
m = s // 60
s %= 60
await ctx.send(f'{h}hrs {m}min {s}sec remaining.')
else:
raise error
def setup(client: commands.Bot):
client.add_cog(Handlers(client))
|
419999
|
from __future__ import print_function
import threading, sys, time, traceback
class DebugLock(object):
def __init__(self, name, verbose=False):
self.name = name
self._lock = threading.Lock()
self.verbose = verbose
def acquire(self, latency_warn_msec=None):
if self.verbose:
print("-=" * 20)
print("*****", self.name, "request acquire by", threading.currentThread())
if self.verbose:
frame = sys._getframe()
traceback.print_stack(frame)
print("-=" * 20)
tstart = time.time()
self._lock.acquire()
tstop = time.time()
print("*****", self.name, "acquired by", threading.currentThread())
if latency_warn_msec is not None:
lat = (tstop - tstart) * 1000.0
if lat > latency_warn_msec:
print(" **** WARNING acquisition time %.1f msec" % lat)
if self.verbose:
frame = sys._getframe()
traceback.print_stack(frame)
print("-=" * 20)
def release(self):
print("*****", self.name, "released by", threading.currentThread())
if self.verbose:
frame = sys._getframe()
traceback.print_stack(frame)
print("-=" * 20)
self._lock.release()
def __enter__(self):
print("__enter__", end=" ")
self.acquire()
def __exit__(self, etype, eval, etb):
print("__exit__", end=" ")
self.release()
if etype:
print("*****", self.name, "error on __exit__", threading.currentThread())
raise
|
420000
|
from numbers import Number
from typing import Union
from pathlib import Path
import numpy as np
import scipy.sparse as sp
from .sparsegraph import SparseGraph
data_dir = Path(__file__).parent
def load_from_npz(file_name: str) -> SparseGraph:
"""Load a SparseGraph from a Numpy binary file.
Parameters
----------
file_name
Name of the file to load.
Returns
-------
SparseGraph
Graph in sparse matrix format.
"""
with np.load(file_name, allow_pickle=True) as loader:
loader = dict(loader)
dataset = SparseGraph.from_flat_dict(loader)
return dataset
def load_dataset(name: str,
directory: Union[Path, str] = data_dir
) -> SparseGraph:
"""Load a dataset.
Parameters
----------
name
Name of the dataset to load.
directory
Path to the directory where the datasets are stored.
Returns
-------
SparseGraph
The requested dataset in sparse format.
"""
if isinstance(directory, str):
directory = Path(directory)
if not name.endswith('.npz'):
name += '.npz'
path_to_file = directory / name
if path_to_file.exists():
return load_from_npz(path_to_file)
else:
raise ValueError("{} doesn't exist.".format(path_to_file))
def networkx_to_sparsegraph(
nx_graph: Union['nx.Graph', 'nx.DiGraph'],
label_name: str = None,
sparse_node_attrs: bool = True,
sparse_edge_attrs: bool = True
) -> 'SparseGraph':
"""Convert NetworkX graph to SparseGraph.
Node attributes need to be numeric.
Missing entries are interpreted as 0.
Labels can be any object. If non-numeric they are interpreted as
categorical and enumerated.
This ignores all edge attributes except the edge weights.
Parameters
----------
nx_graph
Graph to convert.
Returns
-------
SparseGraph
Converted graph.
"""
import networkx as nx
# Extract node names
int_names = True
for node in nx_graph.nodes:
int_names &= isinstance(node, int)
if int_names:
node_names = None
else:
node_names = np.array(nx_graph.nodes)
nx_graph = nx.convert_node_labels_to_integers(nx_graph)
# Extract adjacency matrix
adj_matrix = nx.adjacency_matrix(nx_graph)
# Collect all node attribute names
attrs = set()
for _, node_data in nx_graph.nodes().data():
attrs.update(node_data.keys())
# Initialize labels and remove them from the attribute names
if label_name is None:
labels = None
else:
if label_name not in attrs:
raise ValueError("No attribute with label name '{}' found.".format(label_name))
attrs.remove(label_name)
labels = [0 for _ in range(nx_graph.number_of_nodes())]
if len(attrs) > 0:
# Save attribute names if not integer
all_integer = all((isinstance(attr, int) for attr in attrs))
if all_integer:
attr_names = None
attr_mapping = None
else:
attr_names = np.array(list(attrs))
attr_mapping = {k: i for i, k in enumerate(attr_names)}
# Initialize attribute matrix
if sparse_node_attrs:
attr_matrix = sp.lil_matrix((nx_graph.number_of_nodes(), len(attr_names)), dtype=np.float32)
else:
attr_matrix = np.zeros((nx_graph.number_of_nodes(), len(attr_names)), dtype=np.float32)
else:
attr_matrix = None
attr_names = None
# Fill label and attribute matrices
for inode, node_attrs in nx_graph.nodes.data():
for key, val in node_attrs.items():
if key == label_name:
labels[inode] = val
else:
if not isinstance(val, Number):
if node_names is None:
raise ValueError("Node {} has attribute '{}' with value '{}', which is not a number."
.format(inode, key, val))
else:
raise ValueError("Node '{}' has attribute '{}' with value '{}', which is not a number."
.format(node_names[inode], key, val))
if attr_mapping is None:
attr_matrix[inode, key] = val
else:
attr_matrix[inode, attr_mapping[key]] = val
if attr_matrix is not None and sparse_node_attrs:
attr_matrix = attr_matrix.tocsr()
# Convert labels to integers
if labels is None:
class_names = None
else:
try:
labels = np.array(labels, dtype=np.float32)
class_names = None
except ValueError:
class_names = np.unique(labels)
class_mapping = {k: i for i, k in enumerate(class_names)}
labels_int = np.empty(nx_graph.number_of_nodes(), dtype=np.float32)
for inode, label in enumerate(labels):
labels_int[inode] = class_mapping[label]
labels = labels_int
return SparseGraph(
adj_matrix=adj_matrix, attr_matrix=attr_matrix, labels=labels,
node_names=node_names, attr_names=attr_names, class_names=class_names,
metadata=None)
|
420020
|
import requests
from isserviceup.services.models.service import Service, Status
class StatusIOPlugin(Service):
status_url = 'https://api.status.io/'
@property
def statuspage_id(self):
raise NotImplemented()
def get_status(self):
r = requests.get('{}/1.0/status/{}'.format(
self.status_url.strip("/"), self.statuspage_id
))
try:
j = r.json()
except ValueError:
print(r.content)
raise
expected_status = {
100: Status.ok,
200: Status.minor,
300: Status.major,
400: Status.critical,
500: Status.critical,
600: Status.maintenance,
}
try:
status_code = j['result']['status_overall']['status_code']
if status_code in expected_status:
return expected_status[status_code]
raise Exception('unexpected status')
except KeyError:
print(j)
raise
|
420029
|
from unittest.mock import MagicMock
import pytest
from pytest import fixture
from pytest_mock import MockFixture
from injectable import InjectionContainer, Injectable
from injectable.container.namespace import Namespace
from injectable.errors import InjectionError
from injectable.injection.injection_utils import (
get_namespace_injectables,
RegistryType,
filter_by_group,
resolve_single_injectable,
)
@fixture
def injection_container_mock(mocker: MockFixture):
return mocker.patch("injectable.injection.injection_utils.InjectionContainer")
class TestGetNamespaceInjectables:
@pytest.mark.parametrize(
"registry_type", (RegistryType.CLASS, RegistryType.QUALIFIER)
)
def test__get_namespace_injectables(
self, registry_type: RegistryType, injection_container_mock: InjectionContainer
):
# given
dependency_name = "TEST"
namespace_key = "TEST_NAMESPACE"
namespace = MagicMock(spec=Namespace)()
injection_container_mock.NAMESPACES = {namespace_key: namespace}
# when
injectables = get_namespace_injectables(
dependency_name, registry_type, namespace_key
)
# then
assert namespace.class_registry.get.called == (
registry_type is RegistryType.CLASS
)
assert namespace.qualifier_registry.get.called == (
registry_type is RegistryType.QUALIFIER
)
registry = (
namespace.class_registry
if registry_type is RegistryType.CLASS
else namespace.qualifier_registry
)
assert registry.get.call_args[0][0] is dependency_name
assert injectables == registry.get.return_value
class TestFilterByGroup:
def test__filter_by_group__when_exclude_groups_is_none(self):
# given
injectables = [MagicMock(group="A"), MagicMock(group="A"), MagicMock(group="B")]
# when
matches = filter_by_group({*injectables}, group="A")
# then
assert len(matches) == 2
assert all(match in injectables[:2] for match in matches)
def test__filter_by_group__when_group_is_none(self):
# given
injectables = [MagicMock(group="A"), MagicMock(group="A"), MagicMock(group="B")]
# when
matches = filter_by_group({*injectables}, exclude_groups=["B"])
# then
assert len(matches) == 2
assert all(match in injectables[:2] for match in matches)
def test__filter_by_group__when_group_and_exclude_groups_are_set(self):
# given
injectables = [MagicMock(group="A"), MagicMock(group="A"), MagicMock(group="B")]
# when
matches = filter_by_group({*injectables}, group="A", exclude_groups=["A"])
# then
assert len(matches) == 0
class TestResolveSingleInjectable:
def test__resolve_single_injectable__obvious_case(self):
# given
expected_injectable = MagicMock(spec=Injectable)()
matches = {expected_injectable}
# when
injectable = resolve_single_injectable("TEST", RegistryType.CLASS, matches)
# then
assert injectable == expected_injectable
def test__resolve_single_injectable__when_there_are_no_primary_injectables(self):
# given
matches = {MagicMock(primary=False), MagicMock(primary=False)}
# then when
with pytest.raises(InjectionError):
resolve_single_injectable("TEST", RegistryType.CLASS, matches)
def test__resolve_single_injectable__when_there_are_multiple_primary_injectables(
self,
):
# given
matches = {MagicMock(primary=True), MagicMock(primary=True)}
# then when
with pytest.raises(InjectionError):
resolve_single_injectable("TEST", RegistryType.CLASS, matches)
def test__resolve_single_injectable__when_there_are_one_primary_injectables(self):
# given
primary_injectable = MagicMock(primary=True)
non_primary_injectable = MagicMock(primary=False)
matches = {primary_injectable, non_primary_injectable}
# when
injectable = resolve_single_injectable("TEST", RegistryType.CLASS, matches)
# then
assert injectable is primary_injectable
|
420043
|
import FWCore.ParameterSet.Config as cms
from RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import HGCalRecHit
HEBRecHitGPUProd = cms.EDProducer('HEBRecHitGPU',
HGCHEBUncalibRecHitsTok = cms.InputTag('HGCalUncalibRecHit', 'HGCHEBUncalibRecHits'),
HGCHEB_keV2DIGI = HGCalRecHit.__dict__['HGCHEB_keV2DIGI'],
HGCHEB_noise_MIP = HGCalRecHit.__dict__['HGCHEB_noise_MIP'],
weights = HGCalRecHit.__dict__['layerWeights'] )
|
420068
|
import os
import shutil
import test_util
@given(u'I create a dir "{dirPath}"')
def step_impl(context, dirPath):
os.makedirs(dirPath, 0755);
@then(u'I should delete dir "{dirPath}"')
def step_impl(contxt, dirPath):
shutil.rmtree(dirPath)
@when(u'I execute utility with no flag')
def step_impl(context):
cmd = ["./dump_db_stats"]
context.output, context.error, context.returncode = test_util.cli_call(cmd, expect_success=False)
@when(u'I execute utility with flag "{flag}" and path "{path}"')
def step_impl(context, flag, path):
cmd = ["./dump_db_stats"]
cmd.append(flag)
cmd.append(path)
context.output, context.error, context.returncode = test_util.cli_call(cmd, expect_success=False)
@then(u'I should get a process exit code "{expectedReturncode}"')
def step_impl(context, expectedReturncode):
assert (str(context.returncode) == expectedReturncode), "Return code: expected (%s), instead found (%s)" % (expectedReturncode, context.returncode)
|
420074
|
import logging
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from notebookstestcase import _PYGSTI_ROOT, notebooks_in_path, _make_test
# All tutorials to be tested are under this directory
_TUTORIALS_ROOT = _PYGSTI_ROOT / 'jupyter_notebooks' / 'Tutorials'
# File resources to be copied to the workdir before testing
_TUTORIAL_FILES = [
'tutorial_files/MyCircuits.txt',
'tutorial_files/timestamped_dataset.txt',
'tutorial_files/Example_GST_Data'
]
def test_tutorials():
logging.getLogger('traitlets').setLevel(logging.CRITICAL)
with TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
# Copy tutorial file resources
for f_path in _TUTORIAL_FILES:
src = _TUTORIALS_ROOT / f_path
dest = tmp_path / f_path
dest.parent.mkdir(parents=True, exist_ok=True)
if src.is_dir():
shutil.copytree(src, dest)
else:
shutil.copy(src, dest)
# Emit a test for each notebook
for nb_path in notebooks_in_path(_TUTORIALS_ROOT):
yield _make_test(nb_path, tmp_path, _TUTORIALS_ROOT)
|
420075
|
from tia.bbg.v3api import *
LocalTerminal = Terminal('localhost', 8194)
from tia.bbg.datamgr import *
|
420077
|
pkgname = "bsded"
pkgver = "0.99.0"
pkgrel = 0
build_style = "makefile"
pkgdesc = "FreeBSD ed(1) utility"
maintainer = "q66 <<EMAIL>>"
license = "BSD-2-Clause"
url = "https://github.com/chimera-linux/bsded"
source = f"https://github.com/chimera-linux/bsded/archive/refs/tags/v{pkgver}.tar.gz"
sha256 = "ae351b0a03519d2ec251f2fb3210eb402e4babd17b9c1e0f3ab2aa307bb3505f"
|
420101
|
from lxml import html
import requests
from bs4 import BeautifulSoup
import sys
import os
import re
import time
REGEX = '\s*([\d.]+)'
count = 0
#this code prints out information (vulnerability ID, description, severity, and link) for all the vulnerabilities for a given dependency passed in through command line
def usage(code=0):
print('''Usage: {} [options] component_name version
Choose a component and version to see any/all vulnerabilities
'''.format(os.path.basename(sys.argv[0])))
sys.exit(code)
#returns parsed items with the desired tag from website passed in
def returnSoupItemsDesc(link):
results = requests.get(link)
resultsContent = results.content
#creates a list of website's parsed content
soup = BeautifulSoup(resultsContent, 'xml')
return soup
def print_info(soup_items, link):
print('Potential vulnerabilities found at ' + time.strftime("%Y-%m-%d %H:%M"))
cvss_versions = soup_items.find_all('span', attrs={'data-testid':'page-header-vuln-id'})
for version in cvss_versions:
print('vulnerability id: {}\n'.format(version.text))
descriptions = soup_items.find_all('p', attrs={'data-testid':'vuln-analysis-description'})
for description in descriptions:
print('description: {}\n'.format(description.text))
version3_severity = soup_items.find_all('span', attrs={'data-testid':'vuln-cvssv3-base-score-severity'})
if len(version3_severity):
for severity in version3_severity:
print('version 3 severity: {}\n'.format(severity.text))
version2_severity = soup_items.find_all('span', attrs={'data-testid':'vuln-cvssv2-base-score-severity'})
if len(version2_severity):
for severity in version2_severity:
print('version 2 severity: {}\n'.format(severity.text))
print ('link to full description: {}\n'.format(link))
def version_cmp(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def cmp(a, b):
return (a > b) - (a < b)
def do_it_all(link):
soup_items = returnSoupItemsDesc(link)
links = soup_items.find_all('a')
#loops through all lines of html code with the <a> tag
for item in links:
if 'CVE' in item.text:
#constructs link for one of the vulnerabilities
cve_link = 'https://nvd.nist.gov{}'.format(item.get('href'))
cve_soup_items = returnSoupItemsDesc(cve_link)
rows = cve_soup_items.find_all('tr', class_='vulnerable')
if(len(rows)>0):
last_columns = rows[len(rows)-1].findChildren('td')
num_columns = len(last_columns)
if dependency in last_columns[0].text:
#no version
if(no_version):
print_info(cve_soup_items, cve_link)
count = 1
#check version from column 1 (no 'up to' or 'from' columns)
elif (num_columns<2 or num_columns>3):
version_block = last_columns[0].text
try:
version = re.search('\s*([\d.]+).*?(\s*([\d.]+))', version_block).group(2)
if(version_cmp(version,user_version)>=0):
print_info(cve_soup_items, cve_link)
count = 1
except IndexError:
print_info(cve_soup_items, cve_link)
count = 1
elif (num_columns ==2):
version_block = last_columns[1].text
#\s*([\d.]+)
version = re.search(REGEX, version_block).group(1)
inc_or_exc = re.search('(inc|exc)', version_block).group(1)
if (inc_or_exc == 'inc'):
if (version_cmp(version,user_version)>=0):
print_info(cve_soup_items, cve_link)
count = 1
elif (inc_or_exc == 'exc'):
if (version_cmp(version,user_version)>0):
print_info(cve_soup_items, cve_link)
count = 1
else:
version_block = last_columns[2].text
#\s*([\d.]+)
version_high = re.search(REGEX, version_block).group(1)
version_block_first = rows[0].findChildren('td')[1]
version_low = re.search(REGEX, version_block_first.text).group(1)
end = False
#if user_version is outside of version range
if(version_cmp(version_high,user_version)<0 or version_cmp(version_low, user_version)>0):
end = True
#not outside of range and only one row
elif(len(rows)==1):
print_info(cve_soup_items, cve_link)
count = 1
end = True
#more than 1 row
current_row = 0
current_col = 1
# print('2 +rows')
while not end:
columns = rows[current_row].findChildren('td')
#version less than up to of first row
if version_cmp(re.search(REGEX,columns[current_col+1].text).group(1),user_version)>0:
print_info(cve_soup_items, cve_link)
count = 1
end = True
#version less than from
elif version_cmp(re.search(REGEX,rows[current_row+1].findChildren('td')[current_col].text).group(1),user_version)>0:
end = True
#check next row
else:
current_row = current_row + 1
if count == 0:
print('No potential vulnerabilities found at ' + time.strftime("%Y-%m-%d %H:%M"))
if len(sys.argv[1:]) == 2:
dependency = sys.argv[1]
user_version = sys.argv[2]
link= 'https://nvd.nist.gov/vuln/search/results?form_type=Basic&results_type=overview&query={}&search_type=all'.format(dependency)
dependency=dependency.replace("+","_")
no_version = False
elif len(sys.argv[1:])==1:
dependency = sys.argv[1]
link= 'https://nvd.nist.gov/vuln/search/results?form_type=Basic&results_type=overview&query={}&search_type=all'.format(dependency)
dependency=dependency.replace("+","_")
no_version = True
else:
usage(1)
if __name__ == '__main__':
do_it_all(link)
sys.exit(0)
|
420132
|
import warnings
from ._conf import PYRAMID_PARAMS
from ._funcs import _get_crs, _verify_shape_bounds
from ._types import Bounds, Shape
class GridDefinition(object):
"""Object representing the tile pyramid source grid."""
def __init__(
self, grid=None, shape=None, bounds=None, srs=None, is_global=False, **kwargs
):
if isinstance(grid, str) and grid in PYRAMID_PARAMS:
self.type = grid
self.shape = Shape(*PYRAMID_PARAMS[grid]["shape"])
self.bounds = Bounds(*PYRAMID_PARAMS[grid]["bounds"])
self.is_global = PYRAMID_PARAMS[grid]["is_global"]
self.crs = _get_crs(PYRAMID_PARAMS[grid]["srs"])
self.left, self.bottom, self.right, self.top = self.bounds
elif grid is None or grid == "custom":
for i in ["proj", "epsg"]:
if i in kwargs:
srs = {i: kwargs[i]} if srs is None else srs
warnings.warn(
DeprecationWarning(
"'%s' should be packed into a dictionary and passed to "
"'srs'" % i
)
)
self.type = "custom"
_verify_shape_bounds(shape=shape, bounds=bounds)
self.shape = Shape(*shape)
self.bounds = Bounds(*bounds)
self.is_global = is_global
self.crs = _get_crs(srs)
self.left, self.bottom, self.right, self.top = self.bounds
# check if parameters match with default grid type
for default_grid_name in PYRAMID_PARAMS:
default_grid = GridDefinition(default_grid_name)
if self.__eq__(default_grid):
self.type = default_grid_name
elif isinstance(grid, dict):
if "type" in grid:
warnings.warn(
DeprecationWarning("'type' is deprecated and should be 'grid'")
)
if "grid" not in grid:
grid["grid"] = grid.pop("type")
self.__init__(**grid)
elif isinstance(grid, GridDefinition):
self.__init__(**grid.to_dict())
else:
raise ValueError("invalid grid definition: %s" % grid)
@property
def srid(self):
warnings.warn(DeprecationWarning("'srid' attribute is deprecated"))
return self.crs.to_epsg()
def to_dict(self):
return dict(
bounds=self.bounds,
is_global=self.is_global,
shape=self.shape,
srs=dict(wkt=self.crs.to_wkt()),
type=self.type,
)
def from_dict(config_dict):
return GridDefinition(**config_dict)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.shape == other.shape
and self.bounds == other.bounds
and self.is_global == other.is_global
and self.crs == other.crs
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.type in PYRAMID_PARAMS:
return 'GridDefinition("%s")' % self.type
else:
return (
"GridDefinition("
'"%s", '
"shape=%s, "
"bounds=%s, "
"is_global=%s, "
"srs=%s"
")"
% (
self.type,
tuple(self.shape),
tuple(self.bounds),
self.is_global,
self.crs,
)
)
def __hash__(self):
return hash(repr(self))
|
420146
|
from queue import Queue, Empty
import threading
from asyncio import sleep
import traceback
import os
from .pull import GitPuller
class GitWrapper():
def __init__(self, repo, repobranch, repofolder):
self.repo = repo
self.finished = False
self.error = False
self.logs = []
self.gitpuller = GitPuller(repo, repobranch, repofolder)
self.repofolder = repofolder
async def start_pull(self):
print("Pulling from git repo")
# We don't need a lock on git puller since we only call it from this object.
try:
q = Queue()
def pull():
try:
for line in self.gitpuller.pull():
q.put_nowait(line)
q.put_nowait(None) # Signal we are done
except Exception as e:
q.put_nowait(e)
raise e
self.gp_thread = threading.Thread(target=pull)
self.gp_thread.start()
while True:
try:
progress = q.get_nowait()
except Empty:
await sleep(0.5)
continue
if progress is None:
os.chdir(self.repofolder)
break
if isinstance(progress, Exception):
self.logs.extend([
l.strip()
for l in traceback.format_exception(
type(progress), progress, progress.__traceback__
)
])
self.error = True
return
print(progress)
self.logs.append(progress)
finally:
self.finished = True
|
420151
|
import binascii
import hashlib
import os
import random
import string
import subprocess
import sys
import time
import threading
import Queue
BIN_PATH = "./build/test_random_sha1"
NTHREADS = 2
NTESTS = 10
NBYTES = 20
global still_making_input
still_making_input = True
tests = Queue.Queue()
failures = list()
#
# Helper functions
#
def random_string(len):
""" Returns a random string of length 'len' consisting of upper + lowercase letters and digits """
ret = list()
rand = random.Random()
for i in xrange(len):
ret.append("%.02x" % rand.randint(0, 255))
return "".join(ret)
#selector = string.ascii_uppercase + string.ascii_lowercase + string.digits
#return ''.join(random.choice(selector) for _ in range(len))
def run_test(input_string, expected_output):
""" Run the C test program, comparing the Python SHA1 implementation to the one in C """
return subprocess.call([BIN_PATH, input_string, expected_output])
def run_in_thread(target_function):
t = threading.Thread(target=target_function)
t.start()
t.join()
def run_tests():
while (tests.empty() == False) or (still_making_input == True):
try:
inp, out = tests.get(True, 0)
retcode = run_test(inp, out)
if retcode != 0:
failures.append([inp, out])
sys.stdout.write("X")
else:
sys.stdout.write(".")
sys.stdout.flush()
except Queue.Empty:
time.sleep(0.1)
def make_test_input():
# Create input and expected output
for i in xrange(NTESTS):
test_input = random_string(NBYTES)
#test_input = bytearray(random.getrandbits(8) for _ in xrange(8))
sha = hashlib.sha1()
sha.update(binascii.a2b_hex(test_input))
test_output = sha.hexdigest()
tests.put([test_input, test_output])
#
# Test driver
#
if __name__ == "__main__":
# Read NTESTS from stdin
if len(sys.argv) > 1:
if sys.argv[1].isdigit():
NTESTS = int(sys.argv[1])
# Read NTHREADS from stdin
if len(sys.argv) > 2:
if sys.argv[2].isdigit():
NTHREADS = int(sys.argv[2])
# Read NBYTES from stdin
if len(sys.argv) > 3:
if sys.argv[3].isdigit():
NBYTES = int(sys.argv[3])
# Tell user what is going to happen
print("")
str_threads = "thread"
if NTHREADS > 1:
str_threads += "s"
print("Running tests on %d %s SHA1-hashing %d random %d-byte strings," % (NTHREADS, str_threads, NTESTS, NBYTES))
print("comparing the results to the output of Python's hashlib.sha1().")
print("")
# Spawn thread to create test inputs in the background, instead of blocking here...
t_mk_input = threading.Thread(target=make_test_input)
t_mk_input.start()
# Create new threads
threadlist = list()
for i in range(NTHREADS):
threadlist.append(threading.Thread(target=run_tests))
# Run all threads
for i, thread in enumerate(threadlist):
thread.start()
# Wait for input-creation to complete
t_mk_input.join()
still_making_input = False
# Wait for threads to complete
for i, thread in enumerate(threadlist):
thread.join()
print(" ")
print(" ")
print("%d/%d tests succeeded." % (NTESTS - len(failures), NTESTS))
print(" ")
if len(failures) > 0:
error_log = open("error_log.txt", "a")
for fail_input, fail_output in failures:
error_log.write("./build/test_random2 %s %s %s" % (fail_input, fail_output, os.linesep))
error_log.close()
|
420195
|
from typing import Optional, cast
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Response, status
from opal_common.authentication.deps import JWTAuthenticator
from opal_common.authentication.types import JWTClaims
from opal_common.schemas.policy import PolicyBundle
from opal_common.schemas.policy_source import GitPolicyScopeSource
from opal_common.schemas.scopes import Scope
from opal_server.config import opal_server_config
from opal_server.git_fetcher import GitPolicyFetcher
from opal_server.scopes.scope_repository import ScopeNotFoundError, ScopeRepository
def init_scope_router(scopes: ScopeRepository, authenticator: JWTAuthenticator):
router = APIRouter(dependencies=[Depends(authenticator)])
def _allowed_scoped_authenticator(
claims: JWTClaims = Depends(authenticator), scope_id: str = Path(...)
):
allowed_scopes = claims.get("allowed_scopes")
if not allowed_scopes or scope_id not in allowed_scopes:
raise HTTPException(status.HTTP_403_FORBIDDEN)
@router.put("", status_code=status.HTTP_201_CREATED)
async def put_scope(*, scope_in: Scope):
await scopes.put(scope_in)
from opal_server.worker import sync_scope
sync_scope.delay(scope_in.scope_id)
return Response(status_code=status.HTTP_201_CREATED)
@router.get(
"/{scope_id}", response_model=Scope, response_model_exclude={"policy": {"auth"}}
)
async def get_scope(*, scope_id: str):
try:
scope = await scopes.get(scope_id)
return scope
except ScopeNotFoundError:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail=f"No such scope: {scope_id}"
)
@router.delete("/{scope_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_scope(*, scope_id: str):
await scopes.delete(scope_id)
from opal_server.worker import delete_scope
delete_scope.delay(scope_id)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/{scope_id}", status_code=status.HTTP_200_OK)
async def refresh_scope(scope_id: str):
try:
_ = await scopes.get(scope_id)
from opal_server.worker import sync_scope
sync_scope.delay(scope_id)
return Response(status_code=status.HTTP_200_OK)
except ScopeNotFoundError:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail=f"No such scope: {scope_id}"
)
@router.get(
"/{scope_id}/policy",
response_model=PolicyBundle,
status_code=status.HTTP_200_OK,
dependencies=[Depends(_allowed_scoped_authenticator)],
)
async def get_scope_policy(
*,
scope_id: str = Path(..., title="Scope ID"),
base_hash: Optional[str] = Query(
None,
description="hash of previous bundle already downloaded, server will return a diff bundle.",
),
):
scope = await scopes.get(scope_id)
if isinstance(scope.policy, GitPolicyScopeSource):
fetcher = GitPolicyFetcher(
opal_server_config.BASE_DIR,
scope.scope_id,
cast(GitPolicyScopeSource, scope.policy),
)
bundle = fetcher.make_bundle(base_hash)
return bundle
return router
|
420202
|
from .map import Map
from .layer import Layer
from .source import Source
from .layout import Layout
from .themes import Themes as themes
from .basemaps import Basemaps as basemaps
from .palettes import Palettes as palettes
from .styles import animation_style
from .styles import basic_style
from .styles import color_bins_style
from .styles import color_category_style
from .styles import color_continuous_style
from .styles import cluster_size_style
from .styles import isolines_style
from .styles import size_bins_style
from .styles import size_category_style
from .styles import size_continuous_style
from .legends import basic_legend
from .legends import color_bins_legend
from .legends import color_category_legend
from .legends import color_continuous_legend
from .legends import size_bins_legend
from .legends import size_category_legend
from .legends import size_continuous_legend
from .legends import default_legend
from .widgets import basic_widget
from .widgets import animation_widget
from .widgets import category_widget
from .widgets import formula_widget
from .widgets import histogram_widget
from .widgets import time_series_widget
from .widgets import default_widget
from .popups import popup_element
from .popups import default_popup_element
from .kuviz import all_publications
from .kuviz import delete_publication
__all__ = [
'Map',
'Layer',
'Source',
'Layout',
'basemaps',
'themes',
'palettes',
'animation_style',
'basic_style',
'color_bins_style',
'color_category_style',
'color_continuous_style',
'cluster_size_style',
'isolines_style',
'size_bins_style',
'size_category_style',
'size_continuous_style',
'basic_legend',
'color_bins_legend',
'color_category_legend',
'color_continuous_legend',
'size_bins_legend',
'size_category_legend',
'size_continuous_legend',
'default_legend',
'animation_widget',
'basic_widget',
'category_widget',
'formula_widget',
'histogram_widget',
'time_series_widget',
'default_widget',
'popup_element',
'default_popup_element',
'all_publications',
'delete_publication'
]
|
420214
|
class Solution:
def candy(self, ratings: List[int]) -> int:
ratingsLength = len(ratings)
if ratingsLength == 0:
return 0
candies = [1] * ratingsLength
for i in range(1, ratingsLength):
if ratings[i] > ratings[i - 1]:
candies[i] = candies[i - 1] + 1
totalCandies = candies[ratingsLength - 1]
for i in range(ratingsLength - 2, -1, -1):
if ratings[i] > ratings[i + 1]:
candies[i] = max(candies[i], candies[i + 1] + 1)
totalCandies += candies[i]
return totalCandies
|
420226
|
import pyb
from pyb import UART
from pyb import Pin
import time
class DHT11:
def __init__(self,pin_):
self.PinName=pin_
time.sleep(1)
self.gpio_pin = Pin(pin_, Pin.OUT_PP)
# pyb.delay(10)
def read_temp_hum(self):
data=[]
j=0
gpio_pin=self.gpio_pin
gpio_pin = Pin(self.PinName, Pin.OUT_PP) # can not ignore
gpio_pin.low()
time.sleep(0.018)
gpio_pin.high()
#wait to response
gpio_pin = Pin(self.PinName,Pin.IN)
while gpio_pin.value()==1:
continue
while gpio_pin.value()==0:
continue
while gpio_pin.value()==1:
continue
#get data
while j<40:
k=0
while gpio_pin.value()==0:
continue
while gpio_pin.value()==1:
k+=1
if k>100:break
if k<3:
data.append(0)
else:
data.append(1)
j=j+1
j=0
humidity_bit=data[0:8]
humidity_point_bit=data[8:16]
temperature_bit=data[16:24]
temperature_point_bit=data[24:32]
check_bit=data[32:40]
humidity=0
humidity_point=0
temperature=0
temperature_point=0
check=0
temp_negative=0
# data[24] = 1
# print(data[24:32])
#means temperature value is negative,set data[24] with 0 to ignore it.
if data[24] == 1:
data[24] = 0
# print(data[24:32])
temp_negative = 1
for i in range(8):
humidity+=humidity_bit[i]*2**(7-i)
humidity_point+=humidity_point_bit[i]*2**(7-i)
temperature+=temperature_bit[i]*2**(7-i)
temperature_point+=temperature_point_bit[i]*2**(7-i)
check+=check_bit[i]*2**(7-i)
tmp=humidity+humidity_point+temperature+temperature_point
if check==tmp:
if temp_negative == 1:
return -(temperature+temperature_point/10),humidity+humidity_point/10
temp_negative = 0
else:
return temperature+temperature_point/10,humidity+humidity_point/10
else:
print('checksum ERROR')
return 0,0
|
420235
|
from starry_process import calibrate
import numpy as np
import os
import shutil
# Utility funcs to move figures to this directory
abspath = lambda *args: os.path.join(
os.path.dirname(os.path.abspath(__file__)), *args
)
copy = lambda name, src, dest: shutil.copyfile(
abspath("data", name, src), abspath(dest)
)
# Run
calibrate.run(
path=abspath("data/hicontrast"),
generate=dict(nspots=dict(mu=2), contrast=dict(mu=0.5)),
sample=dict(compute_inclination_pdf=False),
)
# Copy output to this directory
copy(
"hicontrast", "corner_transformed.pdf", "calibration_hicontrast_corner.pdf"
)
copy("hicontrast", "latitude.pdf", "calibration_hicontrast_latitude.pdf")
|
420244
|
import multiprocessing
def square_mp(in_queue, out_queue):
while(True):
n = in_queue.get()
n_squared = n**2
out_queue.put(n_squared)
if __name__ == '__main__':
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
process = multiprocessing.Process(target=square_mp, args=(in_queue, out_queue))
process.start()
for i in range(10):
in_queue.put(i)
i_squared = out_queue.get()
print(f"{i} squared is {i_squared}")
process.terminate()
|
420248
|
import random as rnd
p = 0.3 # Probability of success
n = 10 # Number of trials
count = 0 # Count number of successes
def Bernoulli(p): # Bernoulli RVG Function
u = rnd.random()
if 0 <= u < p:
return 1
else:
return 0
for i in range(n):
count = count + Bernoulli(p)
print( 'v = ' , count )
|
420273
|
from sklearn.metrics import average_precision_score, accuracy_score, f1_score
def acc_f1(output, labels, average='binary'):
preds = output.max(1)[1].type_as(labels)
if preds.is_cuda:
preds = preds.cpu()
labels = labels.cpu()
accuracy = accuracy_score(preds, labels)
f1 = f1_score(preds, labels, average=average)
return accuracy, f1
|
420306
|
from __future__ import annotations
import typing
from ctc import rpc
from ctc import spec
async def async_is_contract_address(
address: spec.Address,
block: spec.BlockNumberReference = 'latest',
provider: spec.ProviderSpec = None,
) -> bool:
code = await rpc.async_eth_get_code(
address=address,
block_number=block,
provider=provider,
)
return len(code) >= 3
async def async_are_contract_addresses(
addresses: typing.Sequence[spec.Address],
block: spec.BlockNumberReference = 'latest',
provider: spec.ProviderSpec = None,
) -> dict[spec.Address, bool]:
codes = await rpc.async_batch_eth_get_code(
addresses=addresses,
block_number=block,
provider=provider,
)
return {address: len(code) > 3 for address, code in zip(addresses, codes)}
|
420323
|
import argparse
import logging
from abc import ABC
from typing import Dict, List
import numpy as np
from data.dataset import Dataset, TestUnit, TestUnits
from simulation.outcome_generators import OutcomeGenerator
from simulation.treatment_assignment import TreatmentAssignmentPolicy
def get_treatment_ids(treatment_assignments: List[Dict]):
return [
unit_treatments["treatment_ids"] for unit_treatments in treatment_assignments
]
class AbstractDataGenerator(ABC):
def __init__(
self,
id_to_graph_dict,
treatment_assignment_policy: TreatmentAssignmentPolicy,
outcome_generator: OutcomeGenerator,
in_sample_dataset: Dataset,
out_sample_dataset: Dataset,
args: argparse.Namespace,
) -> None:
self.id_to_graph_dict = id_to_graph_dict
self.treatment_assignment_policy = treatment_assignment_policy
self.outcome_generator = outcome_generator
self.in_sample_dataset = in_sample_dataset
self.out_sample_dataset = out_sample_dataset
self.args = args
def get_train_assignments(self, units: np.ndarray) -> list:
return [
self.treatment_assignment_policy.assign_treatment(unit) for unit in units
]
def get_test_assignments(
self, units: np.ndarray, mode: str, num_test_treatments_per_unit: int
) -> list:
return [
self.treatment_assignment_policy.get_assignments_for_unit(
unit=unit,
num_test_treatments_per_unit=num_test_treatments_per_unit,
mode=mode,
)
for unit in units
]
class DataGenerator(AbstractDataGenerator):
def __init__(
self,
id_to_graph_dict,
treatment_assignment_policy: TreatmentAssignmentPolicy,
outcome_generator: OutcomeGenerator,
in_sample_dataset: Dataset,
out_sample_dataset: Dataset,
args: argparse.Namespace,
):
super().__init__(
id_to_graph_dict,
treatment_assignment_policy,
outcome_generator,
in_sample_dataset,
out_sample_dataset,
args,
)
def generate_train_data(self) -> None:
treatment_ids = [
self.treatment_assignment_policy.assign_treatment(unit)
for unit in self.in_sample_dataset.get_units()
]
print(np.unique(treatment_ids))
outcomes = self.outcome_generator.generate_outcomes_for_units(
units=self.in_sample_dataset.get_units(), treatment_ids=treatment_ids
)
self.in_sample_dataset.add_assigned_treatments(treatment_ids=treatment_ids)
self.in_sample_dataset.add_outcomes(outcomes=outcomes)
def get_unseen_treatments(
self,
in_sample_treatment_assignments: List[Dict],
out_sample_treatment_assignments: List[Dict],
) -> List:
in_sample_ids = get_treatment_ids(in_sample_treatment_assignments)
out_sample_ids = get_treatment_ids(out_sample_treatment_assignments)
all_test_ids = np.concatenate((in_sample_ids, out_sample_ids)).flatten()
set_test_ids = set(np.unique(all_test_ids))
set_train_ids = set(self.in_sample_dataset.get_unique_treatment_ids())
set_unseen_test_ids = set_test_ids - set_train_ids
return list(set_unseen_test_ids)
def generate_test_units(
self, test_units: np.ndarray, test_assignments: List[Dict]
) -> List[TestUnit]:
test_data = []
test_assignments_ids = get_treatment_ids(test_assignments)
treatment_propensities = [
unit_treatments["propensities"] for unit_treatments in test_assignments
]
for i in range(len(test_units)):
true_outcomes = self.outcome_generator.generate_outcomes_for_unit(
unit=test_units[i], treatment_ids=test_assignments_ids[i]
)
test_unit = TestUnit(
covariates=test_units[i],
treatment_ids=test_assignments_ids[i],
treatment_propensities=treatment_propensities[i],
true_outcomes=true_outcomes,
)
test_data.append(test_unit)
return test_data
def generate_test_data(self) -> TestUnits:
in_sample_units, out_sample_units = (
self.in_sample_dataset.get_units(),
self.out_sample_dataset.get_units(),
)
logging.info(f"Num in-sample units: {len(in_sample_units)}")
logging.info(f"Num out-sample units: {len(out_sample_units)}")
in_sample_treatment_assignments = self.get_test_assignments(
units=in_sample_units,
mode="most",
num_test_treatments_per_unit=self.args.max_test_assignments,
)
out_sample_treatment_assignments = self.get_test_assignments(
units=out_sample_units,
mode="most",
num_test_treatments_per_unit=self.args.max_test_assignments,
)
in_sample_test_units = self.generate_test_units(
test_units=in_sample_units, test_assignments=in_sample_treatment_assignments
)
out_sample_test_units = self.generate_test_units(
test_units=out_sample_units,
test_assignments=out_sample_treatment_assignments,
)
test_units_dict = {
"in_sample": in_sample_test_units,
"out_sample": out_sample_test_units,
}
unseen_treatment_ids = self.get_unseen_treatments(
in_sample_treatment_assignments=in_sample_treatment_assignments,
out_sample_treatment_assignments=out_sample_treatment_assignments,
)
return TestUnits(
test_units_dict=test_units_dict,
id_to_graph_dict=self.id_to_graph_dict,
unseen_treatment_ids=unseen_treatment_ids,
)
|
420380
|
import multiprocessing
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from .SurvivalTree import SurvivalTree
from .scoring import concordance_index
class RandomSurvivalForest:
def __init__(self, n_estimators=100, min_leaf=3, unique_deaths=3,
n_jobs=None, parallelization_backend="multiprocessing", oob_score=False):
"""
A Random Survival Forest is a prediction model especially designed for survival analysis.
:param n_estimators: The numbers of trees in the forest.
:param min_leaf: The minimum number of samples required to be at a leaf node. A split point at any depth will
only be considered if it leaves at least min_leaf training samples in each of the left and right branches.
:param unique_deaths: The minimum number of unique deaths required to be at a leaf node.
:param n_jobs: The number of jobs to run in parallel for fit. None means 1.
"""
self.n_estimators = n_estimators
self.min_leaf = min_leaf
self.unique_deaths = unique_deaths
self.n_jobs = n_jobs
self.parallelization_backend = parallelization_backend
self.bootstrap_idxs = None
self.bootstraps = []
self.oob_idxs = None
self.oob_score = oob_score
self.trees = []
self.timeline = None
def fit(self, x, y):
"""
Build a forest of trees from the training set (X, y).
:param x: The input samples. Should be a Dataframe with the shape [n_samples, n_features].
:param y: The target values as a Dataframe with the survival time in the first column and the event
in the second with the shape [n_samples, 2]
:return: self: object
"""
self.timeline = range(y.iloc[:, 1].min(), y.iloc[:, 1].max(), 1)
if self.n_jobs == -1:
self.n_jobs = multiprocessing.cpu_count()
elif self.n_jobs is None:
self.n_jobs = 1
self.bootstrap_idxs = self.draw_bootstrap_samples(x)
trees = Parallel(n_jobs=self.n_jobs, backend=self.parallelization_backend)(delayed(self.create_tree)(x, y, i)
for i in range(self.n_estimators))
for i in range(len(trees)):
if trees[i].prediction_possible:
self.trees.append(trees[i])
self.bootstraps.append(self.bootstrap_idxs[i])
if self.oob_score:
self.oob_score = self.compute_oob_score(x, y)
return self
def create_tree(self, x, y, i):
"""
Grows a survival tree for the bootstrap samples.
:param y: label data frame y with survival time as the first column and event as second
:param x: feature data frame x
:param i: Indices
:return: SurvivalTree
"""
n_features = int(round(np.sqrt(x.shape[1]), 0))
f_idxs = np.random.permutation(x.shape[1])[:n_features]
tree = SurvivalTree(x=x.iloc[self.bootstrap_idxs[i], :], y=y.iloc[self.bootstrap_idxs[i], :],
f_idxs=f_idxs, n_features=n_features,
unique_deaths=self.unique_deaths, min_leaf=self.min_leaf,
timeline=self.timeline)
return tree
def compute_oob_ensembles(self, xs):
"""
Compute OOB ensembles.
:return: List of oob ensemble for each sample.
"""
results = [compute_oob_ensemble_chf(sample_idx=sample_idx, xs=xs, trees=self.trees,
bootstraps=self.bootstraps) for sample_idx in range(xs.shape[0])]
oob_ensemble_chfs = [i for i in results if not i.empty]
return oob_ensemble_chfs
def compute_oob_score(self, x, y):
"""
Compute the oob score (concordance-index).
:return: c-index of oob samples
"""
oob_ensembles = self.compute_oob_ensembles(x)
c = concordance_index(y_time=y.iloc[:, 1], y_pred=oob_ensembles, y_event=y.iloc[:, 0])
return c
def predict(self, xs):
"""
Predict survival for xs.
:param xs: The input samples
:return: List of the predicted cumulative hazard functions.
"""
ensemble_chfs = [compute_ensemble_chf(sample_idx=sample_idx, xs=xs, trees=self.trees)
for sample_idx in range(xs.shape[0])]
return ensemble_chfs
def draw_bootstrap_samples(self, data):
"""
Draw bootstrap samples
:param data: Data to draw bootstrap samples of.
:return: Bootstrap indices for each of the trees
"""
bootstrap_idxs = []
for i in range(self.n_estimators):
no_samples = len(data)
data_rows = range(no_samples)
bootstrap_idx = np.random.choice(data_rows, no_samples)
bootstrap_idxs.append(bootstrap_idx)
return bootstrap_idxs
def compute_ensemble_chf(sample_idx, xs, trees):
denominator = 0
numerator = 0
for b in range(len(trees)):
sample = xs.iloc[sample_idx].to_list()
chf = trees[b].predict(sample)
denominator = denominator + 1
numerator = numerator + 1 * chf
ensemble_chf = numerator / denominator
return ensemble_chf
def compute_oob_ensemble_chf(sample_idx, xs, trees, bootstraps):
denominator = 0
numerator = 0
for b in range(len(trees)):
if sample_idx not in bootstraps[b]:
sample = xs.iloc[sample_idx].to_list()
chf = trees[b].predict(sample)
denominator = denominator + 1
numerator = numerator + 1 * chf
if denominator != 0:
oob_ensemble_chf = numerator / denominator
else:
oob_ensemble_chf = pd.Series()
return oob_ensemble_chf
|
420407
|
from torch import nn
import torch
class MultiClassLogisticRegression(nn.Module):
def __init__(self,
theta_params: int,
num_of_classes: int):
super(MultiClassLogisticRegression, self).__init__()
self.__linear = nn.Linear(theta_params, num_of_classes)
def forward(self,
x_input: torch.tensor) -> torch.tensor:
return self.__linear(x_input)
|
420408
|
import nuke
def shuffle():
for selected_node in nuke.selectedNodes():
if selected_node.Class() == 'Read':
all_channels = selected_node.channels()
all_channels = list(set([i.split('.')[0] for i in all_channels]))
for channel in all_channels:
shuffle_node = nuke.createNode('Shuffle', inpanel=False)
shuffle_node['name'].setValue(channel+'_'+selected_node['name'].getValue())
shuffle_node['in'].setValue(channel)
shuffle_node.setInput(0, selected_node)
shuffle_node['postage_stamp'].setValue(1)
def main():
shuffle()
|
420432
|
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
from flearn.optimizer.pgd import PerturbedGradientDescent
from flearn.utils.tf_utils import process_grad, process_sparse_grad
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using Federated prox to Train')
self.inner_opt = PerturbedGradientDescent(params['learning_rate'], params['mu'])
super(Server, self).__init__(params, learner, dataset)
def train(self):
'''Train using Federated Proximal'''
print('Training with {} workers ---'.format(self.clients_per_round))
for i in range(self.num_rounds):
# test model
if i % self.eval_every == 0:
stats = self.test() # have set the latest model for all clients
stats_train = self.train_error_and_loss()
tqdm.write('At round {} accuracy: {}'.format(i, np.sum(stats[3])*1.0/np.sum(stats[2]))) # testing accuracy
tqdm.write('At round {} training accuracy: {}'.format(i, np.sum(stats_train[3])*1.0/np.sum(stats_train[2])))
tqdm.write('At round {} training loss: {}'.format(i, np.dot(stats_train[4], stats_train[2])*1.0/np.sum(stats_train[2])))
model_len = process_grad(self.latest_model).size
global_grads = np.zeros(model_len)
client_grads = np.zeros(model_len)
num_samples = []
local_grads = []
for c in self.clients:
num, client_grad = c.get_grads(model_len)
local_grads.append(client_grad)
num_samples.append(num)
global_grads = np.add(global_grads, client_grad * num)
global_grads = global_grads * 1.0 / np.sum(np.asarray(num_samples))
difference = 0
for idx in range(len(self.clients)):
difference += np.sum(np.square(global_grads - local_grads[idx]))
difference = difference * 1.0 / len(self.clients)
tqdm.write('gradient difference: {}'.format(difference))
indices, selected_clients = self.select_clients(i, num_clients=self.clients_per_round) # uniform sampling
np.random.seed(i) # make sure that the stragglers are the same for FedProx and FedAvg
active_clients = np.random.choice(selected_clients, round(self.clients_per_round * (1 - self.drop_percent)), replace=False)
csolns = [] # buffer for receiving client solutions
self.inner_opt.set_params(self.latest_model, self.client_model)
for idx, c in enumerate(selected_clients.tolist()):
# communicate the latest model
c.set_params(self.latest_model)
total_iters = int(self.num_epochs * c.num_samples / self.batch_size)+2 # randint(low,high)=[low,high)
# solve minimization locally
if c in active_clients:
soln, stats = c.solve_inner(num_epochs=self.num_epochs, batch_size=self.batch_size)
else:
#soln, stats = c.solve_iters(num_iters=np.random.randint(low=1, high=total_iters), batch_size=self.batch_size)
soln, stats = c.solve_inner(num_epochs=np.random.randint(low=1, high=self.num_epochs), batch_size=self.batch_size)
# gather solutions from client
csolns.append(soln)
# track communication cost
self.metrics.update(rnd=i, cid=c.id, stats=stats)
# update models
self.latest_model = self.aggregate(csolns)
self.client_model.set_params(self.latest_model)
# final test model
stats = self.test()
stats_train = self.train_error_and_loss()
self.metrics.accuracies.append(stats)
self.metrics.train_accuracies.append(stats_train)
tqdm.write('At round {} accuracy: {}'.format(self.num_rounds, np.sum(stats[3])*1.0/np.sum(stats[2])))
tqdm.write('At round {} training accuracy: {}'.format(self.num_rounds, np.sum(stats_train[3])*1.0/np.sum(stats_train[2])))
|
420475
|
import pytest
import torch.nn as nn
from kornia.metrics import AverageMeter
from kornia.x import EarlyStopping, ModelCheckpoint
from kornia.x.utils import TrainerState
@pytest.fixture
def model():
return nn.Conv2d(3, 10, kernel_size=1)
def test_callback_modelcheckpoint(tmp_path, model):
cb = ModelCheckpoint(tmp_path, 'test_monitor')
assert cb is not None
metric = {'test_monitor': AverageMeter()}
metric['test_monitor'].avg = 1.
cb(model, epoch=0, valid_metric=metric)
assert cb.best_metric == 1.0
assert (tmp_path / "model_0.pt").is_file()
def test_callback_earlystopping(model):
cb = EarlyStopping('test_monitor', patience=2)
assert cb is not None
assert cb.counter == 0
metric = {'test_monitor': AverageMeter()}
metric['test_monitor'].avg = 1
state = cb(model, epoch=0, valid_metric=metric)
assert state == TrainerState.TRAINING
assert cb.best_score == -1
assert cb.counter == 0
metric['test_monitor'].avg = 2
state = cb(model, epoch=0, valid_metric=metric)
assert state == TrainerState.TRAINING
assert cb.best_score == -1
assert cb.counter == 1
state = cb(model, epoch=0, valid_metric=metric)
assert state == TrainerState.TERMINATE
|
420492
|
import os
from src.domain.ErrorTypes import ErrorTypes
from src.utils.code_generation import CodeGenerationUtils
from src.validity import CVValiditiyChecker
from src.validity import IncomingEdgeValidityChecker
def generate_code(args):
node = args["node"]
requireds_info = args["requireds_info"]
edges = args["edges"]
checklist={"df_count": {1}, "model_count": {0}}
error, extra= IncomingEdgeValidityChecker.check_validity(node["id"], requireds_info, edges, checklist)
final_code=[]
shared_function_set = set()
additional_local_code = []
errors = []
if(error == ErrorTypes.NO_ERROR):
error, extra2= CVValiditiyChecker.check_validity(node["nodes"], node["edges"])
if ("portion" in extra["dfs"][0]):
df_name = "df_" + extra["dfs"][0]["source_id"] + "[" + str(extra["dfs"][0]["portion"]) + "]"
else:
df_name = "df_" + extra["dfs"][0]["source_id"]
my_args = {"node_id": node["id"], "input_dfs": [df_name], "shared_function_set": shared_function_set, "additional_local_code": additional_local_code, "errors": errors}
gen_code=[]
gen_code.extend(__generate_code_for_estimator_instantination(node["nodes"][extra2["estimator_node_id"]], my_args))
gen_code.extend(__generate_code_for_evaluator_instantination(node["nodes"][extra2["evaluator_node_id"]], my_args))
gen_code.extend(__generate_code_for_param_grid(node, 'estimator_' + extra2["estimator_node_id"], my_args))
gen_code.extend(__generate_code_for_cv_instantination(node, extra2["estimator_node_id"], extra2["evaluator_node_id"]))
gen_code.extend(['model_' + node["id"] + "=" + 'cv_' + node["id"] + ".fit(" + df_name + ")", os.linesep])
# Following might not be logical unless you aim to predict on training data for some specific needs.
gen_code.extend(['df_' + node["id"] + "=" + 'model_' + node["id"] + '.transform(' + df_name + ')', os.linesep])
final_code = CodeGenerationUtils.merge_with_additional_code(gen_code, additional_local_code)
return final_code, shared_function_set, error
def __generate_code_for_cv_instantination(cv_node, estimator_node_id, evaluator_node_id):
return ['cv_'+cv_node["id"] + "=CrossValidator(estimator=", 'estimator_' + estimator_node_id + ", estimatorParamMaps=param_grid_" + cv_node["id"] + ", evaluator=" + 'evaluator_' + evaluator_node_id +")", os.linesep]
def __generate_code_for_estimator_instantination(node, args):
return CodeGenerationUtils.handle_instantination_or_call(node["parameters"], 'estimator_' + node["id"] + ' = ' + node["estimator_name"] + '(', args)
def __generate_code_for_evaluator_instantination(node, args):
return CodeGenerationUtils.handle_instantination_or_call(node["parameters"], 'evaluator_' + node["id"] + ' = ' + node["evaluator_name"] + '(', args)
def __generate_code_for_param_grid(node, cur_estimator_name, args):
# In the future handle this in special requirement handler for parameters
code=["param_grid_" + node["id"] + "=", "None", os.linesep]
# Assuming that fix parameters are given in the estimator itself.
# Maybe reconsider this part.
grid_params = node["parameters"]["parameter_grid"]
if(bool(grid_params)):
code.pop()
code.pop()
code.extend(["ParamGridBuilder()"])
for param in grid_params:
code.extend([".addGrid(" + cur_estimator_name + "." + param + ", " + CodeGenerationUtils.handle_parameter(grid_params[param], args) + ")"])
code.extend([".build()", os.linesep])
return code
|
420495
|
import torch
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
matplotlib.use('Agg')
def last_layer_analysis(heads, task, taskcla, y_lim=False, sort_weights=False):
"""Plot last layer weight and bias analysis"""
print('Plotting last layer analysis...')
num_classes = sum([x for (_, x) in taskcla])
weights, biases, indexes = [], [], []
class_id = 0
with torch.no_grad():
for t in range(task + 1):
n_classes_t = taskcla[t][1]
indexes.append(np.arange(class_id, class_id + n_classes_t))
if type(heads) == torch.nn.Linear: # Single head
biases.append(heads.bias[class_id: class_id + n_classes_t].detach().cpu().numpy())
weights.append((heads.weight[class_id: class_id + n_classes_t] ** 2).sum(1).sqrt().detach().cpu().numpy())
else: # Multi-head
weights.append((heads[t].weight ** 2).sum(1).sqrt().detach().cpu().numpy())
if type(heads[t]) == torch.nn.Linear:
biases.append(heads[t].bias.detach().cpu().numpy())
else:
biases.append(np.zeros(weights[-1].shape)) # For LUCIR
class_id += n_classes_t
# Figure weights
f_weights = plt.figure(dpi=300)
ax = f_weights.subplots(nrows=1, ncols=1)
for i, (x, y) in enumerate(zip(indexes, weights), 0):
if sort_weights:
ax.bar(x, sorted(y, reverse=True), label="Task {}".format(i))
else:
ax.bar(x, y, label="Task {}".format(i))
ax.set_xlabel("Classes", fontsize=11, fontfamily='serif')
ax.set_ylabel("Weights L2-norm", fontsize=11, fontfamily='serif')
if num_classes is not None:
ax.set_xlim(0, num_classes)
if y_lim:
ax.set_ylim(0, 5)
ax.legend(loc='upper left', fontsize='11') #, fontfamily='serif')
# Figure biases
f_biases = plt.figure(dpi=300)
ax = f_biases.subplots(nrows=1, ncols=1)
for i, (x, y) in enumerate(zip(indexes, biases), 0):
if sort_weights:
ax.bar(x, sorted(y, reverse=True), label="Task {}".format(i))
else:
ax.bar(x, y, label="Task {}".format(i))
ax.set_xlabel("Classes", fontsize=11, fontfamily='serif')
ax.set_ylabel("Bias values", fontsize=11, fontfamily='serif')
if num_classes is not None:
ax.set_xlim(0, num_classes)
if y_lim:
ax.set_ylim(-1.0, 1.0)
ax.legend(loc='upper left', fontsize='11') #, fontfamily='serif')
return f_weights, f_biases
|
420529
|
from secml.testing import CUnitTest
from secml.data.loader import CDataLoaderMNIST
class TestCDataLoaderMNIST(CUnitTest):
"""Unittests for CDataLoaderMNIST."""
def test_load(self):
digits = (1, 5, 9)
tr = CDataLoaderMNIST().load('training', digits=digits)
self.logger.info(
"Loading {:} training set samples".format(tr.num_samples))
self.assertEqual(tr.num_samples, 18112)
ts = CDataLoaderMNIST().load('testing', digits=digits)
self.logger.info(
"Loading {:} test set samples".format(ts.num_samples))
self.assertEqual(ts.num_samples, 3036)
n_tr = 1000
n_ts = 1000
tr = CDataLoaderMNIST().load(
'training', digits=digits, num_samples=n_tr)
self.logger.info(
"Loading {:} training set samples".format(tr.num_samples))
self.assertEqual(tr.num_samples, n_tr)
ts = CDataLoaderMNIST().load(
'testing', digits=digits, num_samples=n_ts)
self.logger.info(
"Loading {:} test set samples".format(ts.num_samples))
self.assertEqual(ts.num_samples, n_ts)
# Not enough number of samples (1666) for each desired digit
# in the test set. ValueError should be raised
with self.assertRaises(ValueError):
CDataLoaderMNIST().load('testing', digits=digits, num_samples=5000)
|
420576
|
from django.contrib import admin
from .models import PatientRegister, DoctorRegister, Emergency, Appointment
# Register your models here.
admin.site.register(PatientRegister)
admin.site.register(DoctorRegister)
admin.site.register(Emergency)
admin.site.register(Appointment)
|
420597
|
from sklearn.model_selection import KFold as R_KFold
from horch.datasets import Subset
def k_fold(ds, n_splits=5, shuffle=True, transform=None, test_transform=None, random_state=None):
n = len(ds)
kf = KFold(n_splits, shuffle, random_state)
for train_indices, test_indices in kf.split(list(range(n))):
ds_train = Subset(ds, train_indices, transform)
ds_test = Subset(ds, test_indices, test_transform)
yield ds_train, ds_test
class KFold(R_KFold):
def __init__(self, n_splits='warn', shuffle=True, transform=None, test_transform=None, random_state=None):
super().__init__(n_splits, shuffle, random_state)
self.transform = transform
self.test_transform = test_transform
def split(self, ds, y=None, groups=None):
n = len(ds)
for train_indices, test_indices in super().split(list(range(n))):
ds_train = Subset(ds, train_indices, self.transform)
ds_test = Subset(ds, test_indices, self.test_transform)
yield ds_train, ds_test
def cross_val_score(fit_fn, ds, cv: KFold, verbose=0):
scores = []
for i, (ds_train, ds_val) in enumerate(cv.split(ds)):
if verbose == 1:
print(f"Round {i+1}")
score = fit_fn(ds_train, ds_val, verbose)
print(score)
scores.append(score)
return scores
|
420598
|
from matplotlib.backends.backend_pdf import PdfPages
my_pdf = PdfPages('reports.pdf')
my_pdf.close()
|
420628
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import int
from builtins import open
from future import standard_library
standard_library.install_aliases()
import os
import re
import json
import requests
import math
import backoff
import hashlib
import copy
import errno
import shutil
import traceback
from glob import glob
from datetime import datetime
from subprocess import check_output
from urllib.request import urlopen
from io import StringIO
from lxml.etree import XMLParser, parse, tostring
from importlib import import_module
from celery.result import AsyncResult
from atomicwrites import atomic_write
from bisect import insort
import hysds
from hysds.log_utils import logger, log_prov_es, payload_hash_exists
from hysds.celery import app
from hysds.es_util import get_grq_es
import osaka.main
grq_es = get_grq_es()
# disk usage setting converter
DU_CALC = {"GB": 1024 ** 3, "MB": 1024 ** 2, "KB": 1024}
class NoDedupJobFoundException(Exception):
def __init__(self, message):
self.message = message
super(NoDedupJobFoundException, self).__init__(message)
def get_module(m):
"""Import module and return."""
try:
return import_module(m)
except ImportError:
logger.error('Failed to import module "%s".' % m)
raise
def get_func(f):
"""Import function and return."""
if "." in f:
mod_name, func_name = f.rsplit(".", 1)
mod = get_module(mod_name)
try:
return getattr(mod, func_name)
except AttributeError:
logger.error(
'Failed to get function "%s" from module "%s".' % (func_name, mod_name)
)
raise
else:
try:
return eval(f)
except NameError:
logger.error('Failed to get function "%s".' % (f))
raise
@app.task
def error_handler(uuid):
"""Error handler function."""
result = AsyncResult(uuid)
exc = result.get(propagate=False)
logger.info("Task %s raised exception: %s\n%s" % (uuid, exc, result.traceback))
def get_download_params(url):
"""Set osaka download params."""
params = {}
# set profile
for prof in app.conf.get("BUCKET_PROFILES", []):
if "profile_name" in params:
break
if prof.get("bucket_patterns", None) is None:
params["profile_name"] = prof["profile"]
break
else:
if isinstance(prof["bucket_patterns"], list):
bucket_patterns = prof["bucket_patterns"]
else:
bucket_patterns = [prof["bucket_patterns"]]
for bucket_pattern in prof["bucket_patterns"]:
regex = re.compile(bucket_pattern)
match = regex.search(url)
if match:
logger.info(
"{} matched '{}' for profile {}.".format(
url, bucket_pattern, prof["profile"]
)
)
params["profile_name"] = prof["profile"]
break
return params
def download_file(url, path, cache=False):
"""Download file/dir for input."""
params = get_download_params(url)
if cache:
url_hash = hashlib.md5(url.encode()).hexdigest()
hash_dir = os.path.join(app.conf.ROOT_WORK_DIR, "cache", *url_hash[0:4])
cache_dir = os.path.join(hash_dir, url_hash)
makedirs(cache_dir)
signal_file = os.path.join(cache_dir, ".localized")
if os.path.exists(signal_file):
logger.info("cache hit for {} at {}".format(url, cache_dir))
else:
logger.info("cache miss for {}".format(url))
try:
osaka.main.get(url, cache_dir, params=params)
except Exception as e:
shutil.rmtree(cache_dir)
tb = traceback.format_exc()
raise RuntimeError(
"Failed to download {} to cache {}: {}\n{}".format(
url, cache_dir, str(e), tb
)
)
with atomic_write(signal_file, overwrite=True) as f:
f.write("%sZ\n" % datetime.utcnow().isoformat())
for i in os.listdir(cache_dir):
if i == ".localized":
continue
cached_obj = os.path.join(cache_dir, i)
if os.path.isdir(cached_obj):
dst = os.path.join(path, i) if os.path.isdir(path) else path
try:
os.symlink(cached_obj, dst)
except:
logger.error("Failed to soft link {} to {}".format(cached_obj, dst))
raise
else:
try:
os.symlink(cached_obj, path)
except:
logger.error(
"Failed to soft link {} to {}".format(cached_obj, path)
)
raise
else:
return osaka.main.get(url, path, params=params)
def find_cache_dir(cache_dir):
"""Search for *.localized files."""
cache_dirs = []
for root, dirs, files in os.walk(cache_dir, followlinks=True):
files.sort()
dirs.sort()
for file in files:
if file == ".localized":
signal_file = os.path.join(root, file)
with open(signal_file) as f:
timestamp = f.read()
insort(cache_dirs, (timestamp, signal_file, root))
return cache_dirs[::-1]
def disk_space_info(path):
"""Return disk usage info."""
disk = os.statvfs(path)
capacity = disk.f_frsize * disk.f_blocks
free = disk.f_frsize * disk.f_bavail
used = disk.f_frsize * (disk.f_blocks - disk.f_bavail)
percent_free = math.ceil(float(100) / float(capacity) * free)
return capacity, free, used, percent_free
def get_threshold(path, disk_usage):
"""Return required threshold based on disk usage of a job type."""
capacity, free, used, percent_free = disk_space_info(path)
du_bytes = None
for unit in DU_CALC:
if disk_usage.endswith(unit):
du_bytes = int(disk_usage[0:-2]) * DU_CALC[unit]
break
if du_bytes is None:
raise RuntimeError(
"Failed to determine disk usage requirements from verdi config: {}".format(
disk_usage
)
)
return math.ceil(float(100) / float(capacity) * du_bytes)
def get_disk_usage(path, follow_symlinks=True):
"""Return disk usage size in bytes."""
opts = "-sbL" if follow_symlinks else "-sb"
size = 0
try:
size = int(check_output(["du", opts, path]).split()[0])
except:
pass
return size
def makedirs(dir, mode=0o777):
"""Make directory along with any parent directory that may be needed."""
try:
os.makedirs(dir, mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def validateDirectory(dir, mode=0o755, noExceptionRaise=False):
"""Validate that a directory can be written to by the current process and return 1.
Otherwise, try to create it. If successful, return 1. Otherwise return None.
"""
if os.path.isdir(dir):
if os.access(dir, 7):
return 1
else:
return None
else:
try:
makedirs(dir, mode)
os.chmod(dir, mode)
except:
if noExceptionRaise:
pass
else:
raise
return 1
def getXmlEtree(xml):
"""Return a tuple of [lxml etree element, prefix->namespace dict]."""
parser = XMLParser(remove_blank_text=True)
if xml.startswith("<?xml") or xml.startswith("<"):
return (parse(StringIO(xml), parser).getroot(), getNamespacePrefixDict(xml))
else:
if os.path.isfile(xml):
xmlStr = open(xml).read()
else:
xmlStr = urlopen(xml).read()
return (
parse(StringIO(xmlStr), parser).getroot(),
getNamespacePrefixDict(xmlStr),
)
def getNamespacePrefixDict(xmlString):
"""Take an xml string and return a dict of namespace prefixes to
namespaces mapping."""
nss = {}
defCnt = 0
matches = re.findall(r'\s+xmlns:?(\w*?)\s*=\s*[\'"](.*?)[\'"]', xmlString)
for match in matches:
prefix = match[0]
ns = match[1]
if prefix == "":
defCnt += 1
prefix = "_" * defCnt
nss[prefix] = ns
return nss
def xpath(elt, xp, ns, default=None):
"""
Run an xpath on an element and return the first result. If no results
were returned then return the default value.
"""
res = elt.xpath(xp, namespaces=ns)
if len(res) == 0:
return default
else:
return res[0]
def pprintXml(et):
"""Return pretty printed string of xml element."""
return tostring(et, pretty_print=True)
def parse_iso8601(t):
"""Return datetime from ISO8601 string."""
try:
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%SZ")
def get_short_error(e):
"""Return shortened version of error message."""
e_str = str(e)
if len(e_str) > 35:
return "%s.....%s" % (e_str[:20], e_str[-10:])
else:
return e_str
def get_payload_hash(payload):
"""Return unique hash of HySDS job JSON payload."""
clean_payload = copy.deepcopy(payload)
for k in ("_disk_usage", "_sciflo_job_num", "_sciflo_wuid"):
if k in clean_payload:
del clean_payload[k]
return hashlib.md5(
json.dumps(clean_payload, sort_keys=2, ensure_ascii=True).encode()
).hexdigest()
def no_dedup_job(details):
logger.info("Giving up querying for dedup jobs with args {args} and kwargs {kwargs}".format(**details))
return None
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_tries=8, max_value=32
)
@backoff.on_exception(
backoff.expo, NoDedupJobFoundException, max_tries=8, max_value=32, on_giveup=no_dedup_job
)
def query_dedup_job(dedup_key, filter_id=None, states=None, is_worker=False):
"""
Return job IDs with matching dedup key defined in states
'job-queued', 'job-started', 'job-completed', by default.
"""
hash_exists_in_redis = payload_hash_exists(dedup_key)
if hash_exists_in_redis is True:
logger.info("Payload hash already exists in REDIS: {}".format(dedup_key))
elif hash_exists_in_redis is False:
logger.info("Payload hash does not exist in REDIS: {}".format(dedup_key))
# get states
if states is None:
states = ["job-queued", "job-started", "job-completed"]
# build query
query = {
"sort": [{"job.job_info.time_queued": {"order": "asc"}}],
"size": 1,
"_source": ["_id", "status"],
"query": {
"bool": {
"must": [
{"term": {"payload_hash": dedup_key}},
{
"bool": {
"should": [
{"terms": {"status": states}} # should be an list
]
}
},
]
}
},
}
if filter_id is not None:
query["query"]["bool"]["must_not"] = {"term": {"uuid": filter_id}}
logger.info("constructed query: %s" % json.dumps(query, indent=2))
es_url = "%s/job_status-current/_search" % app.conf["JOBS_ES_URL"]
headers = {"Content-Type": "application/json"}
r = requests.post(es_url, data=json.dumps(query), headers=headers)
if r.status_code != 200:
if r.status_code == 404:
logger.info(
"status_code 404, job_status-current index probably does not exist, returning None"
)
return None
else:
r.raise_for_status()
j = r.json()
logger.info("result: %s" % r.text)
if j["hits"]["total"]["value"] == 0:
if hash_exists_in_redis is True:
if is_worker:
return None
else:
raise NoDedupJobFoundException("Could not find any dedup jobs with the following query: {}".format(
json.dumps(query, indent=2)))
elif hash_exists_in_redis is False:
return None
else:
raise RuntimeError("Could not determine if payload hash already exists in REDIS: {}".format(dedup_key))
else:
hit = j["hits"]["hits"][0]
logger.info(
"Found duplicate job: %s" % json.dumps(hit, indent=2, sort_keys=True)
)
return {
"_id": hit["_id"],
"status": hit["_source"]["status"],
"query_timestamp": datetime.utcnow().isoformat(),
}
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_tries=8, max_value=32
)
def get_job_status(_id):
"""Get job status."""
es_url = "%s/job_status-current/_doc/%s" % (app.conf["JOBS_ES_URL"], _id)
r = requests.get(es_url, params={"_source": "status"})
logger.info("get_job_status status: %s" % r.status_code)
result = r.json()
logger.info("get_job_status result: %s" % json.dumps(result, indent=2))
return result["_source"]["status"] if result["found"] else None
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_tries=8, max_value=32
)
def check_dataset(_id, es_index="grq"):
"""Query for dataset with specified input ID."""
query = {
"query": {
"bool": {
"must": [
{"term": {"_id": _id}},
]
}
}
}
count = grq_es.get_count(index=es_index, body=query)
return count
def dataset_exists(_id, es_index="grq"):
"""Return true if dataset id exists."""
return True if check_dataset(_id, es_index) > 0 else False
def localize_urls(job, ctx):
"""Localize urls for job inputs. Track metrics."""
# get job info
job_dir = job["job_info"]["job_dir"]
# localize urls
for i in job["localize_urls"]:
url = i["url"]
path = i.get("local_path", None)
cache = i.get("cache", True)
if path is None:
path = "%s/" % job_dir
else:
if path.startswith("/"):
pass
else:
path = os.path.join(job_dir, path)
if os.path.isdir(path) or path.endswith("/"):
path = os.path.join(path, os.path.basename(url))
dir_path = os.path.dirname(path)
makedirs(dir_path)
loc_t1 = datetime.utcnow()
try:
download_file(url, path, cache=cache)
except Exception as e:
tb = traceback.format_exc()
raise RuntimeError("Failed to download {}: {}\n{}".format(url, str(e), tb))
loc_t2 = datetime.utcnow()
loc_dur = (loc_t2 - loc_t1).total_seconds()
path_disk_usage = get_disk_usage(path)
job["job_info"]["metrics"]["inputs_localized"].append(
{
"url": url,
"path": path,
"disk_usage": path_disk_usage,
"time_start": loc_t1.isoformat() + "Z",
"time_end": loc_t2.isoformat() + "Z",
"duration": loc_dur,
"transfer_rate": path_disk_usage / loc_dur,
}
)
# signal run_job() to continue
return True
def find_dataset_json(work_dir):
"""Search for *.dataset.json files."""
dataset_re = re.compile(r"^(.*)\.dataset\.json$")
for root, dirs, files in os.walk(work_dir, followlinks=True):
files.sort()
dirs.sort()
for file in files:
match = dataset_re.search(file)
if match:
dataset_file = os.path.join(root, file)
prod_dir = os.path.join(os.path.dirname(root), match.group(1))
if prod_dir != root:
logger.info(
"%s exists in directory %s. Should be in %s. Not uploading."
% (dataset_file, root, prod_dir)
)
elif not os.path.exists(prod_dir):
logger.info(
"Couldn't find product directory %s for dataset.json %s. Not uploading."
% (prod_dir, dataset_file)
)
else:
yield (dataset_file, prod_dir)
def publish_dataset(prod_dir, dataset_file, job, ctx):
"""Publish a dataset. Track metrics."""
# get job info
job_dir = job["job_info"]["job_dir"]
time_start_iso = job["job_info"]["time_start"]
context_file = job["job_info"]["context_file"]
datasets_cfg_file = job["job_info"]["datasets_cfg_file"]
# time start
time_start = parse_iso8601(time_start_iso)
# check for PROV-ES JSON from PGE; if exists, append related PROV-ES info;
# also overwrite merged PROV-ES JSON file
prod_id = os.path.basename(prod_dir)
prov_es_file = os.path.join(prod_dir, "%s.prov_es.json" % prod_id)
prov_es_info = {}
if os.path.exists(prov_es_file):
with open(prov_es_file) as f:
try:
prov_es_info = json.load(f)
except Exception as e:
tb = traceback.format_exc()
raise RuntimeError(
"Failed to log PROV-ES from {}: {}\n{}".format(
prov_es_file, str(e), tb
)
)
log_prov_es(job, prov_es_info, prov_es_file)
# copy _context.json
prod_context_file = os.path.join(prod_dir, "%s.context.json" % prod_id)
shutil.copy(context_file, prod_context_file)
# force ingest? (i.e. disable no-clobber)
ingest_kwargs = { "force": False }
if ctx.get("_force_ingest", False):
logger.info("Flag _force_ingest set to True.")
ingest_kwargs["force"] = True
# upload
tx_t1 = datetime.utcnow()
metrics, prod_json = get_func("hysds.dataset_ingest.ingest")(
*(
prod_id,
datasets_cfg_file,
app.conf.GRQ_UPDATE_URL,
app.conf.DATASET_PROCESSED_QUEUE,
prod_dir,
job_dir,
),
**ingest_kwargs
)
tx_t2 = datetime.utcnow()
tx_dur = (tx_t2 - tx_t1).total_seconds()
prod_dir_usage = get_disk_usage(prod_dir)
# set product provenance
prod_prov = {
"product_type": metrics["ipath"],
"processing_start_time": time_start.isoformat() + "Z",
"availability_time": tx_t2.isoformat() + "Z",
"processing_latency": (tx_t2 - time_start).total_seconds() / 60.0,
"total_latency": (tx_t2 - time_start).total_seconds() / 60.0,
}
prod_prov_file = os.path.join(prod_dir, "%s.prod_prov.json" % prod_id)
if os.path.exists(prod_prov_file):
with open(prod_prov_file) as f:
prod_prov.update(json.load(f))
if "acquisition_start_time" in prod_prov:
if "source_production_time" in prod_prov:
prod_prov["ground_system_latency"] = (
parse_iso8601(prod_prov["source_production_time"])
- parse_iso8601(prod_prov["acquisition_start_time"])
).total_seconds() / 60.0
prod_prov["total_latency"] += prod_prov["ground_system_latency"]
prod_prov["access_latency"] = (
tx_t2 - parse_iso8601(prod_prov["source_production_time"])
).total_seconds() / 60.0
prod_prov["total_latency"] += prod_prov["access_latency"]
# write product provenance of the last product; not writing to an array under the
# product because kibana table panel won't show them correctly:
# https://github.com/elasticsearch/kibana/issues/998
job["job_info"]["metrics"]["product_provenance"] = prod_prov
job["job_info"]["metrics"]["products_staged"].append(
{
"path": prod_dir,
"disk_usage": prod_dir_usage,
"time_start": tx_t1.isoformat() + "Z",
"time_end": tx_t2.isoformat() + "Z",
"duration": tx_dur,
"transfer_rate": prod_dir_usage / tx_dur,
"id": prod_json["id"],
"urls": prod_json["urls"],
"browse_urls": prod_json["browse_urls"],
"dataset": prod_json["dataset"],
"ipath": prod_json["ipath"],
"system_version": prod_json["system_version"],
"dataset_level": prod_json["dataset_level"],
"dataset_type": prod_json["dataset_type"],
"index": prod_json["grq_index_result"]["index"],
}
)
return prod_json
def publish_datasets(job, ctx):
"""Perform dataset publishing if job exited with zero status code."""
# if exit code of job command is non-zero, don't publish anything
exit_code = job["job_info"]["status"]
if exit_code != 0:
logger.info(
"Job exited with exit code %s. Bypassing dataset publishing." % exit_code
)
return True
# if job command never ran, don't publish anything
pid = job["job_info"]["pid"]
if pid == 0:
logger.info("Job command never ran. Bypassing dataset publishing.")
return True
# get job info
job_dir = job["job_info"]["job_dir"]
# find and publish
published_prods = []
for dataset_file, prod_dir in find_dataset_json(job_dir):
# skip if marked as localized input
signal_file = os.path.join(prod_dir, ".localized")
if os.path.exists(signal_file):
logger.info("Skipping publish of %s. Marked as localized input." % prod_dir)
continue
# publish
prod_json = publish_dataset(prod_dir, dataset_file, job, ctx)
# save json for published product
published_prods.append(prod_json)
# write published products to file
pub_prods_file = os.path.join(job_dir, "_datasets.json")
with open(pub_prods_file, "w") as f:
json.dump(published_prods, f, indent=2, sort_keys=True)
# signal run_job() to continue
return True
def triage(job, ctx):
"""Triage failed job's context and job json as well as _run.sh."""
# set time_start if not defined (job failed prior to setting it)
if "time_start" not in job["job_info"]:
job["job_info"]["time_start"] = "{}Z".format(datetime.utcnow().isoformat("T"))
# default triage id
default_triage_id_format = "triaged_job-{job_id}_task-{job[task_id]}"
default_triage_id_regex = "triaged_job-(?P<job_id>.+)_task-(?P<task_id>[-\\w])"
# if exit code of job command is zero, don't triage anything
exit_code = job["job_info"]["status"]
if exit_code == 0:
logger.info("Job exited with exit code %s. No need to triage." % exit_code)
return True
# disable triage
if ctx.get("_triage_disabled", False):
logger.info("Flag _triage_disabled set to True. Not performing triage.")
return True
# Check if custom triage id format was provided
if "_triage_id_format" in ctx:
triage_id_format = ctx["_triage_id_format"]
else:
triage_id_format = default_triage_id_format
# get job info
job_dir = job["job_info"]["job_dir"]
job_id = job["job_info"]["id"]
logger.info("job id: {}".format(job_id))
# Check if the job_id is a triaged dataset. If so, let's parse out the job_id
logger.info("Checking to see if the job_id matches the regex: {}".format(default_triage_id_regex))
match = re.search(default_triage_id_regex, job_id)
if match:
logger.info("job_id matches the triage dataset regex. Parsing out job_id")
parsed_job_id = match.groupdict()["job_id"]
logger.info("extracted job_id: {}".format(parsed_job_id))
else:
logger.info("job_id does not match the triage dataset regex: {}".format(default_triage_id_regex))
parsed_job_id = job_id
# create triage dataset
# Attempt to first use triage id format from user, but if there is any problem use the default id format instead
try:
triage_id = triage_id_format.format(job_id=parsed_job_id, job=job, job_context=ctx)
except Exception as e:
logger.warning(
"Failed to apply custom triage id format because of {}: {}. Falling back to default triage id".format(
e.__class__.__name__, e
)
)
triage_id = default_triage_id_format.format(job_id=parsed_job_id, job=job, job_context=ctx)
triage_dir = os.path.join(job_dir, triage_id)
makedirs(triage_dir)
# create dataset json
ds_file = os.path.join(triage_dir, "{}.dataset.json".format(triage_id))
ds = {
"version": "v{}".format(hysds.__version__),
"label": "triage for job {}".format(parsed_job_id),
}
if "cmd_start" in job["job_info"]:
ds["starttime"] = job["job_info"]["cmd_start"]
if "cmd_end" in job["job_info"]:
ds["endtime"] = job["job_info"]["cmd_end"]
with open(ds_file, "w") as f:
json.dump(ds, f, sort_keys=True, indent=2)
# create met json
met_file = os.path.join(triage_dir, "{}.met.json".format(triage_id))
with open(met_file, "w") as f:
json.dump(job["job_info"], f, sort_keys=True, indent=2)
# triage job-related files
for f in glob(os.path.join(job_dir, "_*")):
if os.path.isdir(f):
shutil.copytree(f, os.path.join(triage_dir, os.path.basename(f)))
else:
shutil.copy(f, triage_dir)
# triage log files
for f in glob(os.path.join(job_dir, "*.log")):
if os.path.isdir(f):
shutil.copytree(f, os.path.join(triage_dir, os.path.basename(f)))
else:
shutil.copy(f, triage_dir)
# triage additional globs
for g in ctx.get("_triage_additional_globs", []):
for f in glob(os.path.join(job_dir, g)):
f = os.path.normpath(f)
dst = os.path.join(triage_dir, os.path.basename(f))
if os.path.exists(dst):
dst = "{}.{}Z".format(dst, datetime.utcnow().isoformat("T"))
try:
if os.path.isdir(f):
shutil.copytree(f, dst)
else:
shutil.copy(f, dst)
except Exception as e:
tb = traceback.format_exc()
logger.error(
"Skipping copying of {}. Got exception: {}\n{}".format(
f, str(e), tb
)
)
# publish
prod_json = publish_dataset(triage_dir, ds_file, job, ctx)
# write published triage to file
pub_triage_file = os.path.join(job_dir, "_triaged.json")
with open(pub_triage_file, "w") as f:
json.dump(prod_json, f, indent=2, sort_keys=True)
# signal run_job() to continue
return True
def mark_localized_datasets(job, ctx):
"""Mark localized datasets to prevent republishing."""
# get job info
job_dir = job["job_info"]["job_dir"]
# find localized datasets and mark
for dataset_file, prod_dir in find_dataset_json(job_dir):
signal_file = os.path.join(prod_dir, ".localized")
with atomic_write(signal_file, overwrite=True) as f:
f.write("%sZ\n" % datetime.utcnow().isoformat())
# signal run_job() to continue
return True
def hashlib_mapper(algo):
"""
:param algo: string
:return: hashlib library for specified algorithm
algorithms available in python3 but not in python2:
sha3_224 sha3_256, sha3_384, blake2b, blake2s, sha3_512, shake_256, shake_128
"""
algo = algo.lower()
if algo == "md5":
return hashlib.md5()
elif algo == "sha1":
return hashlib.sha1()
elif algo == "sha224":
return hashlib.sha224()
elif algo == "sha256":
return hashlib.sha256()
elif algo == "sha384":
return hashlib.sha384()
elif algo == "sha3_224":
return hashlib.sha3_224()
elif algo == "sha3_256":
return hashlib.sha3_256()
elif algo == "sha3_384":
return hashlib.sha3_384()
elif algo == "sha3_512":
return hashlib.sha3_512()
elif algo == "sha512":
return hashlib.sha512()
elif algo == "blake2b":
return hashlib.blake2b()
elif algo == "blake2s":
return hashlib.blake2s()
elif algo == "shake_128":
return hashlib.shake_128()
elif algo == "shake_256":
return hashlib.shake_256()
else:
raise Exception("Unsupported hashing algorithm: %s" % algo)
def calculate_checksum_from_localized_file(file_name, hash_algo):
"""
:param file_name: file path to the localized file after download
:param hash_algo: string, hashing algorithm (md5, sha256, etc.)
:return: string, ex. 8e15beebbbb3de0a7dbed50a39b6e41b ALL LOWER CASE
******** IF USING SHAKE_256 OR SHAKE_128, I DEFAULT THE HEXDIGEST LENGTH TO 255 ********
"""
hash_tool = hashlib_mapper(hash_algo)
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_tool.update(chunk)
if hash_tool.name in ("shake_256", "shake_128"):
return hash_tool.hexdigest(255)
else:
return hash_tool.hexdigest()
def check_file_is_checksum(file_path):
"""
checks if the file has a .hash extension
hashlib.algorithms_guaranteed is a list of all checksum file extensions
return algorithm type (md5, sha256, etc) if it file has a .<algorithm> appended
"""
for algo in hashlib.algorithms_guaranteed:
checksum_file_extension = ".%s" % algo # ex. S1W_SLC_843290304820.zip.md5
if file_path.endswith(checksum_file_extension):
return algo
return None
def read_checksum_file(file_path):
with open(file_path, "r") as f:
checksum = f.readline().rstrip(
"\n"
) # checksum file is only 1 line, for some reason it adds \n at the end
return checksum
def generate_list_checksum_files(job):
"""
:param job:
:param cxt:
:return: list of all checksum files, so we can compare one by one
ex. list of dictionaries: [ {'file_path': '/home/ops/hysds/...', 'algo': 'md5'}, { ... } ]
"""
# reusing directory code from the localize_urls() function
job_dir = job["job_info"]["job_dir"] # get job info
files_with_checksum = []
for i in job["localize_urls"]:
url = i["url"]
path = i.get("local_path", None)
cache = i.get("cache", True)
if path is None:
path = "%s/" % job_dir
else:
if path.startswith("/"):
pass
else:
path = os.path.join(job_dir, path)
if os.path.isdir(path) or path.endswith("/"):
path = os.path.join(path, os.path.basename(url))
dir_path = os.path.dirname(path)
if os.path.isdir(
path
): # if path is a directory, loop through each file in directory
for file in os.listdir(path):
full_file_path = os.path.join(path, file)
hash_algo = check_file_is_checksum(full_file_path)
if hash_algo:
files_with_checksum.append(
{"file_path": full_file_path, "algo": hash_algo}
)
else: # if path is a actually a file
hash_algo = check_file_is_checksum(path)
if hash_algo:
files_with_checksum.append({"file_path": path, "algo": hash_algo})
return files_with_checksum
def validate_checksum_files(job, cxt):
"""
:param job: _job.json
:param cxt: _context.json
:return: void, will raise exception if localized files have mismatched checksum values
"""
# list of dictionaries: ex. [ {'file_path': '/home/ops/hysds/...', 'algo': 'md5'}, { ... } ]
logger.info("validating checksum files:")
files_to_validate = generate_list_checksum_files(job)
logger.info(files_to_validate)
mismatched_checksums = []
exception_string = "Files with mismatched checksum:\n"
logger.info(files_to_validate)
for file_info in files_to_validate:
algo = file_info["algo"]
file_path_checksum = file_info["file_path"]
# this has the hash extension to the file, we need to remove it
file_path = file_path_checksum.replace("." + algo, "")
if not os.path.isfile(file_path):
# if checksum file exists but original file does not exist, we should skip it
# ex. data_set_1.zip.md5 vs data_set_1.zip
logger.info("%s does not exist, skipping" % file_path)
continue
calculated_checksum = calculate_checksum_from_localized_file(file_path, algo)
pre_computed_checksum = read_checksum_file(file_path_checksum)
logger.info(
"calculated_checksum: %s pre_computed_checksum: %s"
% (calculated_checksum, pre_computed_checksum)
)
if calculated_checksum.lower() != pre_computed_checksum.lower():
mismatched_checksums.append(file_path)
exception_string += (
"%s: calculated checksum: %s, pre-computed checksum: %s\n"
% (file_path, calculated_checksum, pre_computed_checksum)
)
if len(mismatched_checksums) > 0:
logger.info(exception_string)
raise Exception(exception_string)
else:
logger.info("checksum preprocessing completed successfully")
return True
|
420633
|
import numpy
from .abstracts import ClassScoresStrategy
class CommonNeighborsStrategy(ClassScoresStrategy):
__name__ = 'CN'
def find_score(self, class_node, test_node):
return self.leg.count_common_neighbors(class_node, test_node)
class AdamicAdarStrategy(ClassScoresStrategy):
__name__ = 'AA'
def find_score(self, class_node, test_node):
score = 0
for n in self.leg.common_neighbors(class_node, test_node):
score += 1 / (numpy.log(self.leg.degree(n)) + 10e-10)
return score
class ResourceAllocationStrategy(ClassScoresStrategy):
__name__ = 'RA'
def find_score(self, class_node, test_node):
score = 0
for n in self.leg.common_neighbors(class_node, test_node):
score += 1 / self.leg.degree(n)
return score
class CompatibilityScoreStrategy(ClassScoresStrategy):
__name__ = 'CS'
def find_score(self, class_node, test_node):
score = 0
for neighbor_node in self.leg.common_neighbors(class_node, test_node):
deg1 = self.leg.degree(neighbor_node) - self.leg.count_common_neighbors(neighbor_node, test_node)
deg2 = self.leg.degree(neighbor_node) - self.leg.count_common_neighbors(neighbor_node, class_node)
score += (1 / deg1 + 1 / deg2)
return score
|
420645
|
import os
import logging
from pathlib import Path
class LoggerHandler():
def __init__(self, level):
self.level = getattr(logging, level)
self.logger = logging.getLogger("OnionScraper")
self.logger.setLevel(self.level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(self.level)
# create file logging
logFile = Path(__file__).parents[1]
logging_path = os.path.join(logFile, "info.log")
fh = logging.FileHandler(logging_path)
# create formatter
formatter = logging.Formatter('[%(asctime)s] - %(name)s - %(levelname)s - %(message)s',datefmt='%a, %d %b %Y %H:%M:%S')
formatter_console = logging.Formatter('[%(asctime)s] - %(levelname)s - %(message)s',datefmt='%d %b %Y %H:%M:%S')
# add formatter to ch
ch.setFormatter(formatter_console)
fh.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(ch) #added logging into console
self.logger.addHandler(fh) #added logging into file
def start_logging(self):
self.logger.info('Starting OnionScraper')
return self.logger
|
420696
|
def test_no_snippets_on_call():
import requests
url = "www.google.fr"
'''TEST
requests.get$
@. ... get
@! get([url])
status: ok
'''
|
420719
|
from .base import *
class TextureMap:
_wrap_modes = {
"repeat": SamplerState.WM_repeat,
"clamp": SamplerState.WM_clamp,
"border_color": SamplerState.WM_border_color,
"mirror": SamplerState.WM_mirror,
"mirror_once": SamplerState.WM_mirror_once
}
_filter_types = {
"nearest": SamplerState.FT_nearest,
"linear": SamplerState.FT_linear,
"nearest_mipmap_nearest": SamplerState.FT_nearest_mipmap_nearest,
"nearest_mipmap_linear": SamplerState.FT_nearest_mipmap_linear,
"linear_mipmap_nearest": SamplerState.FT_linear_mipmap_nearest,
"linear_mipmap_linear": SamplerState.FT_linear_mipmap_linear,
"shadow": SamplerState.FT_shadow
}
def __getstate__(self):
state = self.__dict__.copy()
state["_texture"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
if self.type != "layer":
self.tex_stage = Mgr.get("tex_stage", self.type)
self.set_texture(self._rgb_filename, self._alpha_filename)
def __init__(self, map_type, layer_name=None):
self.type = map_type
if map_type == "layer":
self.tex_stage = TextureStage(layer_name)
else:
self.tex_stage = Mgr.get("tex_stage", map_type)
self._uv_set_id = 0
self.active = False
self._texture = None
self._rgb_filename = ""
self._alpha_filename = ""
self._border_color = (0., 0., 0., 1.)
self._wrap_modes_locked = True
self._wrap_mode_ids = {"u": "repeat", "v": "repeat"}
self._filter_ids = {"min": "linear", "mag": "linear"}
self._anisotropic_degree = 1
self._transform = {"offset": [0., 0.], "rotate": [0.], "scale": [1., 1.]}
def copy(self):
tex_map = TextureMap(self.type)
tex_map.border_color = self._border_color
tex_map.wrap_u = self._wrap_mode_ids["u"]
tex_map.wrap_v = self._wrap_mode_ids["v"]
tex_map.lock_wrap_modes(self._wrap_modes_locked)
tex_map.minfilter = self._filter_ids["min"]
tex_map.magfilter = self._filter_ids["mag"]
tex_map.anisotropic_degree = self._anisotropic_degree
tex_map.copy_transform(self._transform)
texture = self._texture.make_copy() if self._texture else None
tex_map.set_texture(self._rgb_filename, self._alpha_filename, texture)
tex_map.active = self.active
return tex_map
def equals(self, other):
if self.active != other.active:
return False
if not self.active:
return True
if self.get_tex_filenames() != other.get_tex_filenames():
return False
if not self._texture:
return True
other_tex_stage = TextureStage(other.tex_stage)
other_tex_stage.name = self.tex_stage.name
if self.tex_stage != other_tex_stage:
return False
if self._border_color != other.border_color:
return False
if self._wrap_mode_ids["u"] != other.wrap_u:
return False
if self._wrap_mode_ids["v"] != other.wrap_v:
return False
if self._wrap_modes_locked != other.are_wrap_modes_locked():
return False
if self._filter_ids["min"] != other.minfilter:
return False
if self._filter_ids["mag"] != other.magfilter:
return False
if self._anisotropic_degree != other.anisotropic_degree:
return False
for transf_type in ("offset", "rotate", "scale"):
if self._transform[transf_type] != other.get_transform(transf_type):
return False
return True
@property
def anisotropic_degree(self):
return self._anisotropic_degree
@anisotropic_degree.setter
def anisotropic_degree(self, anisotropic_degree):
self._anisotropic_degree = anisotropic_degree
texture = self._texture
if texture:
texture.anisotropic_degree = anisotropic_degree
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color_values):
self._border_color = color_values
texture = self._texture
if texture:
texture.border_color = VBase4(*color_values)
@property
def sort(self):
return self.tex_stage.sort
@sort.setter
def sort(self, sort):
self.tex_stage.sort = sort
@property
def priority(self):
return self.tex_stage.priority
@priority.setter
def priority(self, priority):
self.tex_stage.priority = priority
def __set_wrap_mode(self, axis, wrap_mode_id):
self._wrap_mode_ids[axis] = wrap_mode_id
if self._wrap_modes_locked:
self._wrap_mode_ids["v" if axis == "u" else "u"] = wrap_mode_id
texture = self._texture
if texture:
wrap_mode = self._wrap_modes[wrap_mode_id]
if axis == "u":
texture.wrap_u = wrap_mode
if self._wrap_modes_locked:
texture.wrap_v = wrap_mode
else:
texture.wrap_v = wrap_mode
if self._wrap_modes_locked:
texture.wrap_u = wrap_mode
@property
def wrap_u(self):
return self._wrap_mode_ids["u"]
@wrap_u.setter
def wrap_u(self, wrap_mode_id):
self.__set_wrap_mode("u", wrap_mode_id)
@property
def wrap_v(self):
return self._wrap_mode_ids["v"]
@wrap_v.setter
def wrap_v(self, wrap_mode_id):
self.__set_wrap_mode("v", wrap_mode_id)
def __set_filter_type(self, minmag, filter_id):
self._filter_ids[minmag] = filter_id
texture = self._texture
if texture:
if minmag == "min":
texture.minfilter = self._filter_types[filter_id]
else:
texture.magfilter = self._filter_types[filter_id]
@property
def minfilter(self):
return self._filter_ids["min"]
@minfilter.setter
def minfilter(self, filter_id):
self.__set_filter_type("min", filter_id)
@property
def magfilter(self):
return self._filter_ids["mag"]
@magfilter.setter
def magfilter(self, filter_id):
self.__set_filter_type("mag", filter_id)
@property
def uv_set_name(self):
return self.tex_stage.texcoord_name.name
def set_uv_set_id(self, uv_set_id):
if self._uv_set_id == uv_set_id:
return False
name = str(uv_set_id) if uv_set_id else InternalName.get_texcoord()
self.tex_stage.set_texcoord_name(name)
self._uv_set_id = uv_set_id
return True
@property
def uv_set_id(self):
return self._uv_set_id
@uv_set_id.setter
def uv_set_id(self, uv_set_id):
self.set_uv_set_id(uv_set_id)
def set_texture(self, rgb_filename="", alpha_filename="", texture=None):
if texture is None:
if rgb_filename:
paths = ",".join(GD["config"]["texfile_paths"])
rgb_fname = Filename.from_os_specific(rgb_filename)
if rgb_fname.exists():
rgb_fullpath = rgb_filename
else:
rgb_basename = rgb_fname.get_basename()
rgb_fname = Filename.from_os_specific(rgb_basename)
rgb_fname = DSearchPath.search_path(rgb_fname, paths, ",")
rgb_fullpath = rgb_fname.to_os_specific()
if rgb_fname:
texture = Mgr.load_tex(rgb_fname)
alpha_fullpath = ""
if not texture.is_of_type(MovieTexture):
texture = Texture(self.type)
if alpha_filename:
a_fname = Filename.from_os_specific(alpha_filename)
if a_fname.exists():
alpha_fullpath = alpha_filename
else:
alpha_basename = a_fname.get_basename()
a_fname = Filename.from_os_specific(alpha_basename)
a_fname = DSearchPath.search_path(a_fname, paths, ",")
alpha_fullpath = a_fname.to_os_specific()
if a_fname:
texture.read(rgb_fname, a_fname, 0, 0)
else:
texture = None
else:
alpha_fullpath = ""
texture.read(rgb_fname)
else:
texture = None
else:
texture = None
else:
rgb_fullpath = rgb_filename
alpha_fullpath = alpha_filename
if texture:
texture.border_color = VBase4(*self._border_color)
texture.wrap_u = self._wrap_modes[self._wrap_mode_ids["u"]]
texture.wrap_v = self._wrap_modes[self._wrap_mode_ids["v"]]
texture.minfilter = self._filter_types[self._filter_ids["min"]]
texture.magfilter = self._filter_types[self._filter_ids["mag"]]
texture.anisotropic_degree = self._anisotropic_degree
self._rgb_filename = rgb_fullpath
self._alpha_filename = alpha_fullpath
else:
self._rgb_filename = ""
self._alpha_filename = ""
self._texture = texture
return texture
def get_texture(self):
return self._texture
def get_tex_filenames(self):
return self._rgb_filename, self._alpha_filename
def has_texture(self, rgb_filename, alpha_filename):
return self._rgb_filename == rgb_filename and self._alpha_filename == alpha_filename
def lock_wrap_modes(self, lock):
self._wrap_modes_locked = lock
ids = self._wrap_mode_ids
wrap_mode_id = ids["u"]
if not lock or ids["v"] == wrap_mode_id:
return
ids["v"] = wrap_mode_id
texture = self._texture
if texture:
texture.wrap_v = self._wrap_modes[wrap_mode_id]
def are_wrap_modes_locked(self):
return self._wrap_modes_locked
def set_transform(self, transf_type, comp_index, value):
self._transform[transf_type][comp_index] = value
def get_transform(self, transf_type=None):
return self._transform if transf_type is None else self._transform[transf_type]
def copy_transform(self, transform):
self._transform = {k: v[:] for k, v in transform.items()}
class Layer(TextureMap):
blend_modes = {
"modulate": TextureStage.M_modulate,
"combine": TextureStage.M_combine,
"replace": TextureStage.M_replace,
"decal": TextureStage.M_decal,
"add": TextureStage.M_add,
"blend": TextureStage.M_blend,
"blend_color_scale": TextureStage.M_blend_color_scale,
"selector": TextureStage.M_selector
}
combine_modes = {
"modulate": TextureStage.CM_modulate,
"interpolate": TextureStage.CM_interpolate,
"replace": TextureStage.CM_replace,
"subtract": TextureStage.CM_subtract,
"add": TextureStage.CM_add,
"add_signed": TextureStage.CM_add_signed,
"dot3rgb": TextureStage.CM_dot3_rgb,
"dot3rgba": TextureStage.CM_dot3_rgba
}
combine_mode_sources = {
"texture": TextureStage.CS_texture,
"previous_layer": TextureStage.CS_previous,
"last_stored_layer": TextureStage.CS_last_saved_result,
"primary_color": TextureStage.CS_primary_color,
"constant_color": TextureStage.CS_constant,
"const_color_scale": TextureStage.CS_constant_color_scale
}
combine_mode_src_channels = {
"rgb": TextureStage.CO_src_color,
"1-rgb": TextureStage.CO_one_minus_src_color,
"alpha": TextureStage.CO_src_alpha,
"1-alpha": TextureStage.CO_one_minus_src_alpha
}
def __init__(self, layer_id, name):
TextureMap.__init__(self, "layer", name)
self.id = layer_id
self._name = name
self._blend_mode = "modulate"
self._uses_combine_mode = False
cmbmode_data = {}
cmbmode_data["channels"] = "rgb"
mode_ids = list(self.combine_modes.keys())
for channels in ("rgb", "alpha"):
cmbmode_data[channels] = data = {}
data["on"] = False
data["mode"] = "modulate"
data["source_index"] = 0
data["sources"] = sources = {}
for mode_id in mode_ids:
sources[mode_id] = [["texture", channels]]
mode_ids.remove("replace")
for channels in ("rgb", "alpha"):
sources = cmbmode_data[channels]["sources"]
for mode_id in mode_ids:
sources[mode_id].append(["previous_layer", channels])
sources["interpolate"].append(["last_stored_layer", channels])
self._combine_mode_data = cmbmode_data
self.active = True
def __copy_combine_mode_data(self):
orig_data = self._combine_mode_data
copy_data = orig_data.copy()
copy_data["channels"]
mode_ids = list(self.combine_modes.keys())
for channels in ("rgb", "alpha"):
copy_data[channels] = orig_data[channels].copy()
old_sources = orig_data[channels]["sources"]
copy_data[channels]["sources"] = new_sources = old_sources.copy()
for mode_id, source_data in old_sources.items():
new_sources[mode_id] = [source_ids[:] for source_ids in source_data]
return copy_data
def copy(self, copy_name=False):
layer = Mgr.do("create_tex_layer")
if copy_name:
layer.name = self._name
layer.border_color = self.border_color
layer.wrap_u = self.wrap_u
layer.wrap_v = self.wrap_v
layer.lock_wrap_modes(self.are_wrap_modes_locked())
layer.minfilter = self.minfilter
layer.magfilter = self.magfilter
layer.anisotropic_degree = self.anisotropic_degree
layer.uv_set_id = self.uv_set_id
layer.copy_transform(self.get_transform())
rgb_filename, alpha_filename = self.get_tex_filenames()
tex = self.get_texture()
tex_copy = tex.make_copy() if tex else None
layer.set_texture(rgb_filename, alpha_filename, tex_copy)
layer.color = self.color
layer.rgb_scale = self.rgb_scale
layer.alpha_scale = self.alpha_scale
layer.sort = self.sort
layer.priority = self.priority
layer.stored = self.stored
layer.set_combine_mode_data(self.__copy_combine_mode_data())
layer.blend_mode = self._blend_mode
layer.active = self.active
return layer
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self.tex_stage.name = name
self._name = name
@property
def color(self):
r, g, b, a = self.tex_stage.color
return r, g, b, a
@color.setter
def color(self, color):
self.tex_stage.color = color
@property
def rgb_scale(self):
return self.tex_stage.rgb_scale
@rgb_scale.setter
def rgb_scale(self, scale):
self.tex_stage.rgb_scale = scale
@property
def alpha_scale(self):
return self.tex_stage.alpha_scale
@alpha_scale.setter
def alpha_scale(self, scale):
self.tex_stage.alpha_scale = scale
@property
def stored(self):
return self.tex_stage.saved_result
@stored.setter
def stored(self, stored):
self.tex_stage.saved_result = stored
@property
def blend_mode(self):
return self._blend_mode
@blend_mode.setter
def blend_mode(self, mode_id):
self._blend_mode = mode_id
if not self._uses_combine_mode:
self.tex_stage.set_mode(self.blend_modes[mode_id])
def __apply_combine_mode(self, combine_channels=None):
data = self._combine_mode_data
channels = data["channels"] if combine_channels is None else combine_channels
if not data[channels]["on"]:
return
mode_id = data[channels]["mode"]
mode = self.combine_modes[mode_id]
source_ids = data[channels]["sources"][mode_id]
sources = (self.combine_mode_sources, self.combine_mode_src_channels)
used_sources = [sources[j][source_ids[i][j]] for i in range(len(source_ids))
for j in range(2)]
if channels == "rgb":
self.tex_stage.set_combine_rgb(mode, *used_sources)
else:
self.tex_stage.set_combine_alpha(mode, *used_sources)
def set_combine_mode_data(self, data):
self._combine_mode_data = data
self._uses_combine_mode = data["rgb"]["on"] or data["alpha"]["on"]
if data["rgb"]["on"]:
self.__apply_combine_mode("rgb")
if data["alpha"]["on"]:
self.__apply_combine_mode("alpha")
def set_combine_channels(self, channels):
data = self._combine_mode_data
data["channels"] = channels
on = data[channels]["on"]
mode_id = data[channels]["mode"]
source_ids = data[channels]["sources"][mode_id]
count = len(source_ids)
index = data[channels]["source_index"]
source, src_channels = source_ids[index]
layer_id = self.id
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_channels_use", on)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_mode", mode_id)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_count", count)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_index", index)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source", source)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_channels", src_channels)
def get_selected_combine_channels(self):
return self._combine_mode_data["channels"]
def use_combine_channels(self, uses_channels):
data = self._combine_mode_data
channels = data["channels"]
data[channels]["on"] = uses_channels
self._uses_combine_mode = data["rgb"]["on"] or data["alpha"]["on"]
if uses_channels:
self.__apply_combine_mode()
else:
mode = self.combine_modes["modulate"]
sources = self.combine_mode_sources
tex = sources["texture"]
prev = sources["previous_layer"]
if channels == "rgb":
rgb = self.combine_mode_src_channels["rgb"]
self.tex_stage.set_combine_rgb(mode, tex, rgb, prev, rgb)
else:
alpha = self.combine_mode_src_channels["alpha"]
self.tex_stage.set_combine_alpha(mode, tex, alpha, prev, alpha)
other_channels = "alpha" if channels == "rgb" else "rgb"
if not data[other_channels]["on"]:
self.tex_stage.set_mode(self.blend_modes[self._blend_mode])
def uses_combine_mode(self):
return self._uses_combine_mode
def set_combine_mode(self, mode_id):
data = self._combine_mode_data
channels = data["channels"]
data[channels]["mode"] = mode_id
if self._uses_combine_mode:
self.__apply_combine_mode()
source_ids = data[channels]["sources"][mode_id]
count = len(source_ids)
data[channels]["source_index"] = 0
source, src_channels = source_ids[0]
layer_id = self.id
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_count", count)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_index", 0)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source", source)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_channels", src_channels)
def set_combine_source_index(self, index):
data = self._combine_mode_data
channels = data["channels"]
data[channels]["source_index"] = index
mode_id = data[channels]["mode"]
source_ids = data[channels]["sources"][mode_id]
source, src_channels = source_ids[index]
layer_id = self.id
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source", source)
Mgr.update_remotely("tex_layer_prop", layer_id, "combine_source_channels", src_channels)
def set_combine_source(self, source_id):
data = self._combine_mode_data
channels = data["channels"]
index = data[channels]["source_index"]
mode_id = data[channels]["mode"]
source_ids = data[channels]["sources"][mode_id]
source_ids[index][0] = source_id
if self._uses_combine_mode:
self.__apply_combine_mode()
def set_combine_source_channels(self, src_channels):
data = self._combine_mode_data
channels = data["channels"]
index = data[channels]["source_index"]
mode_id = data[channels]["mode"]
source_ids = data[channels]["sources"][mode_id]
source_ids[index][1] = src_channels
if self._uses_combine_mode:
self.__apply_combine_mode()
def set_property(self, prop_id, value):
if prop_id == "name":
self.name = value
elif prop_id == "color":
self.color = value
elif prop_id == "rgb_scale":
self.rgb_scale = value
elif prop_id == "alpha_scale":
self.alpha_scale = value
elif prop_id == "uv_set_id":
self.uv_set_id = value
elif prop_id == "sort":
self.sort = value
elif prop_id == "priority":
self.priority = value
elif prop_id == "border_color":
self.border_color = value
elif prop_id == "wrap_lock":
self.lock_wrap_modes(value)
elif prop_id == "wrap_u":
self.wrap_u = value
elif prop_id == "wrap_v":
self.wrap_v = value
elif prop_id == "filter_min":
self.minfilter = value
elif prop_id == "filter_mag":
self.magfilter = value
elif prop_id == "anisotropic_degree":
self.anisotropic_degree = value
elif prop_id == "stored":
self.stored = value
elif prop_id == "blend_mode":
self.blend_mode = value
elif prop_id == "combine_mode":
self.set_combine_mode(value)
elif prop_id == "combine_channels":
self.set_combine_channels(value)
elif prop_id == "combine_channels_use":
self.use_combine_channels(value)
elif prop_id == "combine_source_index":
self.set_combine_source_index(value)
elif prop_id == "combine_source":
self.set_combine_source(value)
elif prop_id == "combine_source_channels":
self.set_combine_source_channels(value)
class TexMapManager:
def __init__(self):
self._layers = {}
self._tex_stages = {}
self._id_generator = id_generator()
def setup(self):
TS = TextureStage
stages = self._tex_stages
map_types = ("color", "normal", "height", "normal+height", "gloss",
"color+gloss", "normal+gloss", "glow", "color+glow")
modes = (TS.M_modulate, TS.M_normal, TS.M_height, TS.M_normal_height, TS.M_gloss,
TS.M_modulate_gloss, TS.M_normal_gloss, TS.M_glow, TS.M_modulate_glow)
for map_type, mode in zip(map_types, modes):
stage = TS(f"tex_stage_{map_type}")
stage.set_mode(mode)
stages[map_type] = stage
stages["vertex_colors"] = TS.default
Mgr.accept("create_tex_map", self.__create_tex_map)
Mgr.accept("create_tex_layer", self.__create_layer)
Mgr.accept("register_tex_layer", self.__register_layer)
Mgr.accept("unregister_tex_layer", self.__unregister_layer)
Mgr.expose("tex_layer", lambda layer_id: self._layers.get(layer_id))
Mgr.expose("tex_stage", lambda map_type: self._tex_stages.get(map_type))
Mgr.expose("unique_tex_layer_name", self.__get_unique_layer_name)
Mgr.expose("next_tex_layer_id", lambda: ("tex_layer",) + next(self._id_generator))
Mgr.add_app_updater("new_tex_layer", self.__update_new_layer)
Mgr.add_app_updater("tex_layer_selection", self.__select_layer)
Mgr.add_app_updater("removed_tex_layer", self.__remove_layer)
Mgr.add_app_updater("tex_layer_prop", self.__set_layer_property)
return "texture_maps_ok"
def __get_unique_layer_name(self, material, requested_name="", layer=None):
layers = material.get_layers()
if layer and layer in layers:
layers.remove(layer)
namelist = [l.name for l in layers]
search_pattern = r"^Layer\s*(\d+)$"
naming_pattern = "Layer {:02d}"
return get_unique_name(requested_name, namelist, search_pattern, naming_pattern)
def __create_tex_map(self, map_type):
return TextureMap(map_type)
def __create_layer(self, material=None):
layer_id = ("tex_layer",) + next(self._id_generator)
if material:
name = self.__get_unique_layer_name(material)
else:
name = ""
layer = Layer(layer_id, name)
return layer
def __register_layer(self, layer):
self._layers[layer.id] = layer
def __unregister_layer(self, layer):
del self._layers[layer.id]
def __update_new_layer(self, material_id, source_layer_id):
material = Mgr.get("material", material_id)
if source_layer_id is None:
layer_id = ("tex_layer",) + next(self._id_generator)
name = self.__get_unique_layer_name(material)
layer = Layer(layer_id, name)
material.add_layer(layer)
self._layers[layer_id] = layer
Mgr.update_remotely("new_tex_layer", layer_id, name)
return
source_layer = self._layers[source_layer_id]
source_name = source_layer.name
original_name = re.sub(r" - copy$| - copy \(\d+\)$", "", source_name, 1)
copy_name = original_name + " - copy"
copy_name = self.__get_unique_layer_name(material, copy_name)
layer = source_layer.copy()
layer.name = copy_name
layer_id = layer.id
self._layers[layer_id] = layer
material.add_layer(layer)
Mgr.update_remotely("new_tex_layer", layer_id, copy_name)
def __remove_layer(self, material_id, layer_id):
material = Mgr.get("material", material_id)
layer = self._layers[layer_id]
material.remove_layer(layer)
del self._layers[layer_id]
if not material.get_layers():
layer_id = ("tex_layer",) + next(self._id_generator)
name = self.__get_unique_layer_name(material)
layer = Layer(layer_id, name)
material.add_layer(layer)
self._layers[layer_id] = layer
Mgr.update_remotely("new_tex_layer", layer_id, name)
def __select_layer(self, material_id, layer_id):
material = Mgr.get("material", material_id)
material.set_selected_layer_id(layer_id)
layer = self._layers[layer_id]
prop_id = "on"
on = layer.active
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, on)
rgb_filename, alpha_filename = layer.get_tex_filenames()
prop_id = "color"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.color)
prop_id = "rgb_scale"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.rgb_scale)
prop_id = "alpha_scale"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.alpha_scale)
prop_id = "file_main"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, rgb_filename)
prop_id = "file_alpha"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, alpha_filename)
prop_id = "sort"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.sort)
prop_id = "priority"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.priority)
prop_id = "border_color"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.border_color)
prop_id = "wrap_u"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.wrap_u)
prop_id = "wrap_v"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.wrap_v)
prop_id = "wrap_lock"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.are_wrap_modes_locked())
prop_id = "filter_min"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.minfilter)
prop_id = "filter_mag"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.magfilter)
prop_id = "anisotropic_degree"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.anisotropic_degree)
prop_id = "transform"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.get_transform())
prop_id = "uv_set"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.uv_set_id)
prop_id = "stored"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.stored)
prop_id = "blend_mode"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, layer.blend_mode)
channels = layer.get_selected_combine_channels()
prop_id = "combine_channels"
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, channels)
layer.set_combine_channels(channels)
def __set_layer_property(self, material_id, layer_id, prop_id, value):
material = Mgr.get("material", material_id)
layer = self._layers[layer_id]
reapply_layer = False
if prop_id == "name":
value = self.__get_unique_layer_name(material, value, layer)
elif prop_id == "on":
material.set_map_active("layer", layer_id, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "sort":
layers = material.get_layers()
value = max(0, min(value, len(layers) - 1))
layers.remove(layer)
layers.insert(value, layer)
for i, l in enumerate(layers):
if l is not layer:
l.sort = i
reapply_layer = True
elif prop_id == "wrap_u":
if layer.are_wrap_modes_locked():
Mgr.update_remotely("tex_layer_prop", layer_id, "wrap_v", value)
elif prop_id == "wrap_v":
if layer.are_wrap_modes_locked():
Mgr.update_remotely("tex_layer_prop", layer_id, "wrap_u", value)
elif prop_id == "wrap_lock":
if value:
mode_id = layer.wrap_u
Mgr.update_remotely("tex_layer_prop", layer_id, "wrap_v", mode_id)
elif prop_id == "anisotropic_degree":
value = max(1, min(value, 16))
elif prop_id == "offset_u":
material.set_map_transform("layer", layer_id, "offset", 0, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "offset_v":
material.set_map_transform("layer", layer_id, "offset", 1, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "rotate":
material.set_map_transform("layer", layer_id, "rotate", 0, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "scale_u":
material.set_map_transform("layer", layer_id, "scale", 0, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "scale_v":
material.set_map_transform("layer", layer_id, "scale", 1, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id == "uv_set":
value = max(0, min(value, 7))
material.set_layer_uv_set_id(layer, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
return
elif prop_id in ("color", "rgb_scale", "alpha_scale", "blend_mode",
"combine_mode", "combine_channels_use", "combine_source",
"combine_source_channels", "stored"):
reapply_layer = True
layer.set_property(prop_id, value)
Mgr.update_remotely("tex_layer_prop", layer_id, prop_id, value)
if reapply_layer:
material.reapply_layer(layer)
MainObjects.add_class(TexMapManager)
|
420729
|
import logging
import unittest
from unittest import TestCase
import toml
from fiery_snap.impl.util.service import GenericService
from fiery_snap.impl.util.page import Page, TestPage
import time
import web
import requests
EXAMPLE_TOML = """
name = "test_service"
listening_address = "0.0.0.0"
listening_port = 7878
"""
REQ = {'pong':'test', 'target': 'test_service'}
class MyService(GenericService):
pass
class MockFrontend(object):
NAME = 'mock-frontend'
def __init__(self, name=NAME):
self.name = name
@classmethod
def handle(cls, pagename, json_data):
result = {'pagename': pagename,
'pong': json_data.get('pong', 'failed')}
return result
class TestBasicService(TestCase):
def test_parseblock(self):
config_dict = toml.loads(EXAMPLE_TOML)
svc_config = config_dict
pages = [TestPage,]
config_dict['page_objs'] = pages
TestPage.BACKEND[MockFrontend.NAME] = MockFrontend
svc = MyService(back_end=MockFrontend(), **config_dict)
try:
started = svc.start()
self.assertTrue(started)
self.assertTrue(svc.is_alive())
time.sleep(2.0)
except:
raise
finally:
svc.stop()
self.assertFalse(svc.is_alive())
def test_getpage(self):
config_dict = toml.loads(EXAMPLE_TOML)
svc_config = config_dict
pages = [TestPage,]
config_dict['page_objs'] = pages
svc = MyService(back_end=MockFrontend(), **config_dict)
started = svc.start()
self.assertTrue(started)
try:
self.assertTrue(svc.is_alive())
time.sleep(4.0)
location = svc.get_base_url(TestPage.NAME)
logging.debug("Sending request to: %s" %location)
r = requests.post(location, json=REQ)
self.assertTrue(r.status_code == 200)
jd = r.json()
logging.debug("Testpage returned: %s" %r.text)
self.assertTrue('pong' in jd and jd['pong'] == REQ['pong'])
except:
raise
finally:
svc.stop()
self.assertFalse(svc.is_alive())
if __name__ == '__main__':
unittest.main()
|
420746
|
import datetime
import uuid as uuid_object
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
from projectroles.models import Project
#: Shortcut to Django User model class.
from projectroles.plugins import get_backend_api
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
class FileBoxManager(models.Manager):
"""Manager for custom table-level FileBox queries"""
def find(self, search_term, keywords=None):
"""Return objects matching the query.
:param search_term: Search term (string)
:param keywords: Optional search keywords as key/value pairs (dict)
:return: Python list of FileBox objects
"""
objects = super().get_queryset()
objects = objects.filter(
Q(title__icontains=search_term) | Q(description__icontains=search_term)
)
return objects
#: Choices for the state of the meta data in the management.
CHOICES_STATE_META = (
("ACTIVE", "active (created)"),
("INACTIVE", "inactive (access blocked)"),
("DELETED", "deleted"),
)
#: Choices for the state of the data on the disk.
CHOICES_STATE_DATA = (
("ACTIVE", "active (created)"),
("INACTIVE", "inactive (access blocked)"),
("DELETING", "deleting"),
("DELETED", "deleted"),
)
def fourteen_days_in_the_future():
return timezone.now() + datetime.timedelta(days=14)
def twenty_one_days_in_the_future():
return timezone.now() + datetime.timedelta(days=21)
class FileBox(models.Model):
"""Information stored for each file exchange box."""
#: Search-enabled manager.
objects = FileBoxManager()
#: DateTime of creation
date_created = models.DateTimeField(auto_now_add=True, help_text="DateTime of creation")
#: DateTime of last modification
date_modified = models.DateTimeField(auto_now=True, help_text="DateTime of last modification")
#: UUID used for identification throughout SODAR.
sodar_uuid = models.UUIDField(default=uuid_object.uuid4, unique=True, help_text="SODAR UUID")
#: The project containing this file box.
project = models.ForeignKey(Project, help_text="Project that this file box belongs to")
#: Date when write access is lost.
date_frozen = models.DateTimeField(null=False, blank=False, default=fourteen_days_in_the_future)
#: Date when data is removed.
date_expiry = models.DateTimeField(
null=False, blank=False, default=twenty_one_days_in_the_future
)
#: The current state in the database.
state_meta = models.CharField(
max_length=32,
null=False,
blank=False,
default="INITIAL",
choices=CHOICES_STATE_META,
help_text="State in the management system",
)
#: The current state in the file system.
state_data = models.CharField(
max_length=32, null=False, blank=False, default="INITIAL", choices=CHOICES_STATE_DATA
)
#: The title of the file box.
title = models.CharField(max_length=200, null=False, blank=False, help_text="File box title")
#: Additional description.
description = models.TextField(
null=True, blank=True, help_text="File box description; optional"
)
def update_state_meta(self, user, field, new_state):
# Add audit trail event.
self.audit_entries.create(
actor=user,
action="UPDATE_STATE",
message="updated %s of file box '%s' from '%s' to '%s'"
% (field, self.title, getattr(self, field), new_state),
)
# Register event with timeline.
timeline = get_backend_api("timeline_backend")
if timeline:
tl_event = timeline.add_event(
project=self.project,
app_name="fileboxes",
user=user,
event_name="filebox_update_state",
description="updating state of file box {filebox}",
status_type="OK",
)
tl_event.add_object(obj=self, label="filebox", name=self.title)
# Actually update field.
setattr(self, field, new_state)
self.save()
def grant_list(self):
return [grant.username for grant in self.account_grants.all()]
def get_absolute_url(self):
return reverse(
"fileboxes:filebox-detail",
kwargs={"project": self.project.sodar_uuid, "filebox": self.sodar_uuid},
)
class Meta:
ordering = ("-date_created",)
#: Choices for the FileBoxAuditEntry.action field.
ACTION_CHOICES = (
("CREATE", "created"),
("ADD_MEMBER", "member added"),
("REMOVE_MEMBER", "member removed"),
("UPDATE_STATE", "state updated"),
("UPDATE_ATTRS", "attribute(s) updated"), # non-state field(s)
("FS_APPLY_STATE", "state applied to file system"),
("FS_DATA_REMOVED", "all data removed from file system"),
)
class FileBoxAuditEntry(models.Model):
"""Audit trail entry for a FileBox record."""
#: DateTime of creation
date_created = models.DateTimeField(auto_now_add=True, help_text="DateTime of creation")
#: DateTime of last modification
date_modified = models.DateTimeField(auto_now=True, help_text="DateTime of last modification")
#: UUID used for identification throughout SODAR.
sodar_uuid = models.UUIDField(default=uuid_object.uuid4, unique=True, help_text="SODAR UUID")
file_box = models.ForeignKey(
FileBox,
related_name="audit_entries",
null=False,
blank=False,
on_delete=models.PROTECT,
help_text="The file box that this audit entry belongs to",
)
actor = models.ForeignKey(
AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.PROTECT,
help_text="The actor in this audit trail entry",
)
action = models.TextField(
null=False, blank=False, choices=ACTION_CHOICES, help_text="The action that was performed"
)
message = models.TextField(
null=False, blank=False, help_text="The user-readable audit trail message"
)
raw_log = models.TextField(null=True, blank=True, help_text="Raw text output")
def get_project(self):
return self.file_box.project
class Meta:
ordering = ("-date_created",)
class FileBoxAccountGrant(models.Model):
"""An entry granting access to an account."""
#: DateTime of creation
date_created = models.DateTimeField(auto_now_add=True, help_text="DateTime of creation")
#: DateTime of last modification
date_modified = models.DateTimeField(auto_now=True, help_text="DateTime of last modification")
#: UUID used for identification throughout SODAR.
sodar_uuid = models.UUIDField(default=uuid_object.uuid4, unique=True, help_text="SODAR UUID")
file_box = models.ForeignKey(
FileBox,
related_name="account_grants",
null=False,
blank=False,
on_delete=models.PROTECT,
help_text="The file box that this audit entry belongs to",
)
username = models.CharField(
max_length=200,
blank=False,
null=False,
help_text="User name of the account that is granted access",
)
full_name = models.CharField(
max_length=200, blank=True, null=True, help_text="Full name of the account's owner"
)
email = models.CharField(
max_length=200, blank=True, null=True, help_text="Email of the account"
)
class Meta:
ordering = ("username",)
|
420747
|
words=['Wednesday',
'a lot',
'absence',
'accept',
'acceptable',
'accessible',
'accidentally',
'accommodate',
'accompanied',
'accomplish',
'accumulate',
'accuracy',
'achievement',
'acknowledgment',
'acquaintance',
'acquire',
'acquitted',
'across',
'actually',
'address',
'admission',
'adolescent',
'advice',
'advise',
'advised',
'affected',
'affectionate',
'aggravate',
'aggressive',
'alcohol',
'all right',
'allotted',
'allusion',
'always',
'amateur',
'annual',
'argument',
'arrangement',
'beginning',
'believe',
'business',
'capital',
'capitol',
'coming',
'committee',
'complement',
'compliment',
'decide',
'definite',
'desert',
'dessert',
'divide',
'embarrass',
'exaggerate',
'existence',
'explanation',
'financially',
'forehead',
'foreign',
'forfeit',
'forty',
'forward',
'friend',
'fulfillment',
'gauge',
'generally',
'government',
'governor',
'grammar',
'grammatically',
'grief',
'guaranteed',
'guard',
'guidance',
'happened',
'harass',
'height',
'hero',
'heroes',
'humor',
'hypocrisy',
'hypocrite',
'ignorant',
'illogical',
'imaginary',
'imagine',
'imitate',
'immediately',
'immense',
'incidentally',
'incredible',
'independent',
'indispensable',
'inevitable',
'infinite',
'influential',
'initiative',
'innocence',
'intellectual',
'intelligence',
'intelligent',
'interest',
'interpret',
'interrupt',
'introduce',
'irrelevant',
'irresistible',
'irritable',
'irritated',
"it's",
'its',
'knowledge',
'laboratory',
'legitimate',
'leisure',
'liable',
'library',
'license',
'lightning',
'literature',
'lively',
'loneliness',
'lonely',
'lose',
'lying',
'magazine',
'maintenance',
'maneuver',
'manual',
'manufacture',
'marriage',
'material',
'mathematics',
'meant',
'medicine',
'mere',
'messenger',
'miniature',
'minutes',
'mischievous',
'missile',
'morning',
'mortgage',
'muscles',
'mysterious',
'naturally',
'necessary',
'nickel',
'niece',
'ninety',
'ninth',
'noticeable',
'noticing',
'nuclear',
'nuisance',
'obstacle',
'occasionally',
'occur',
'occurred',
'occurrence',
'omission',
'omitted',
'opinion',
'opponent',
'opportunity',
'opposite',
'optimism',
'organize',
'origin',
'original',
'paid',
'pamphlet',
'parallel',
'particular',
'pastime',
'peculiar',
'performance',
'perhaps',
'permanent',
'permissible',
'personal',
'physical',
'physician',
'piece',
'planned',
'pleasant',
'poison',
'possess',
'possession',
'possible',
'possibly',
'practically',
'prairie',
'precede',
'preferred',
'prejudiced',
'preparation',
'prepare',
'presence',
'prevalent',
'principal',
'principle',
'privilege',
'probably',
'procedure',
'proceed',
'profession',
'professor',
'prominent',
'pronunciation',
'propaganda',
'prophecy',
'prophesy',
'psychology',
'publicly',
'pumpkin',
'purpose',
'pursue',
'quantity',
'quiet',
'quite',
'quizzes',
'realize',
'really',
'receipt',
'receive',
'receiving',
'recognize',
'recommend',
'reference',
'referred',
'referring',
'regular',
'relieve',
'remembrance',
'repetition',
'representative',
'reproduce',
'restaurant',
'rhythm',
'ridiculous',
'roommate',
'sacrifice',
'safety',
'salary',
'schedule',
'secretary',
'seize',
'separate',
'sergeant',
'severely',
'sheriff',
'shining',
'similar',
'simply',
'since',
'sincerely',
'skiing',
'sophomore',
'specimen',
'speech',
'sponsor',
'strength',
'strict',
'stubbornness',
'studying',
'subtlety',
'succeed',
'successful',
'succession',
'sufficient',
'suicide',
'summary',
'superintendent',
'supersede',
'suppose',
'suppress',
'surely',
'surprise',
'surround',
'susceptible',
'suspicious',
'swimming',
'symbol',
'sympathize',
'technique',
'temperament',
'temperature',
'tendency',
'than',
'their',
'themselves',
'then',
'there',
'therefore',
"they're",
'thorough',
'thought',
'through',
'till',
'to',
'tobacco',
'together',
'tomorrow',
'too',
'tournament',
'traffic',
'trafficked',
'tragedy',
'transferred',
'tremendous',
'tried',
'tries',
'trouble',
'truly',
'twelfth',
'two',
'tyranny',
'unanimous',
'unconscious',
'undoubtedly',
'unmistakably',
'unnecessary',
'until',
'usage',
'useful',
'useless',
'using',
'usually',
'vacuum',
'valuable',
'varies',
'various',
'vegetable',
'vengeance',
'venomous',
'vice',
'view',
'vigilance',
'villain',
'violence',
'visible',
'vitamins',
'waive',
'warrant',
'warring',
'weather',
'weird',
'where',
'wherever',
'whether',
'whichever',
"who's",
'wholly',
'whose',
'wield',
'wintry',
'withdrawal',
'woman',
'women',
'worshiped',
'wreck',
'write',
'writing',
'written',
'yield']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.